aboutsummaryrefslogtreecommitdiff
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/6lowpan.c865
-rw-r--r--net/bluetooth/6lowpan.h47
-rw-r--r--net/bluetooth/Kconfig46
-rw-r--r--net/bluetooth/Makefile9
-rw-r--r--net/bluetooth/a2mp.c1023
-rw-r--r--net/bluetooth/a2mp.h150
-rw-r--r--net/bluetooth/af_bluetooth.c324
-rw-r--r--net/bluetooth/amp.c468
-rw-r--r--net/bluetooth/amp.h54
-rw-r--r--net/bluetooth/bnep/Kconfig2
-rw-r--r--net/bluetooth/bnep/bnep.h152
-rw-r--r--net/bluetooth/bnep/core.c155
-rw-r--r--net/bluetooth/bnep/netdev.c20
-rw-r--r--net/bluetooth/bnep/sock.c56
-rw-r--r--net/bluetooth/cmtp/Kconfig2
-rw-r--r--net/bluetooth/cmtp/capi.c15
-rw-r--r--net/bluetooth/cmtp/cmtp.h10
-rw-r--r--net/bluetooth/cmtp/core.c82
-rw-r--r--net/bluetooth/cmtp/sock.c42
-rw-r--r--net/bluetooth/hci_conn.c947
-rw-r--r--net/bluetooth/hci_core.c4610
-rw-r--r--net/bluetooth/hci_event.c3295
-rw-r--r--net/bluetooth/hci_sock.c824
-rw-r--r--net/bluetooth/hci_sysfs.c432
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/hidp/core.c1324
-rw-r--r--net/bluetooth/hidp/hidp.h81
-rw-r--r--net/bluetooth/hidp/sock.c77
-rw-r--r--net/bluetooth/l2cap.c4930
-rw-r--r--net/bluetooth/l2cap_core.c7552
-rw-r--r--net/bluetooth/l2cap_sock.c1618
-rw-r--r--net/bluetooth/lib.c62
-rw-r--r--net/bluetooth/mgmt.c6033
-rw-r--r--net/bluetooth/rfcomm/Kconfig3
-rw-r--r--net/bluetooth/rfcomm/core.c427
-rw-r--r--net/bluetooth/rfcomm/sock.c213
-rw-r--r--net/bluetooth/rfcomm/tty.c578
-rw-r--r--net/bluetooth/sco.c493
-rw-r--r--net/bluetooth/smp.c1406
-rw-r--r--net/bluetooth/smp.h132
40 files changed, 30077 insertions, 8484 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
new file mode 100644
index 00000000000..8796ffa08b4
--- /dev/null
+++ b/net/bluetooth/6lowpan.c
@@ -0,0 +1,865 @@
+/*
+ Copyright (c) 2013 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
+#include <net/af_ieee802154.h> /* to get the address type */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+#include "6lowpan.h"
+
+#include <net/6lowpan.h> /* for the compression support */
+
+#define IFACE_NAME_TEMPLATE "bt%d"
+#define EUI64_ADDR_LEN 8
+
+struct skb_cb {
+ struct in6_addr addr;
+ struct l2cap_conn *conn;
+};
+#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
+
+/* The devices list contains those devices that we are acting
+ * as a proxy. The BT 6LoWPAN device is a virtual device that
+ * connects to the Bluetooth LE device. The real connection to
+ * BT device is done via l2cap layer. There exists one
+ * virtual device / one BT 6LoWPAN network (=hciX device).
+ * The list contains struct lowpan_dev elements.
+ */
+static LIST_HEAD(bt_6lowpan_devices);
+static DEFINE_RWLOCK(devices_lock);
+
+struct lowpan_peer {
+ struct list_head list;
+ struct l2cap_conn *conn;
+
+ /* peer addresses in various formats */
+ unsigned char eui64_addr[EUI64_ADDR_LEN];
+ struct in6_addr peer_addr;
+};
+
+struct lowpan_dev {
+ struct list_head list;
+
+ struct hci_dev *hdev;
+ struct net_device *netdev;
+ struct list_head peers;
+ atomic_t peer_count; /* number of items in peers list */
+
+ struct work_struct delete_netdev;
+ struct delayed_work notify_peers;
+};
+
+static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
+{
+ return netdev_priv(netdev);
+}
+
+static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
+{
+ list_add(&peer->list, &dev->peers);
+ atomic_inc(&dev->peer_count);
+}
+
+static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
+{
+ list_del(&peer->list);
+
+ if (atomic_dec_and_test(&dev->peer_count)) {
+ BT_DBG("last peer");
+ return true;
+ }
+
+ return false;
+}
+
+static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
+ bdaddr_t *ba, __u8 type)
+{
+ struct lowpan_peer *peer, *tmp;
+
+ BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
+ ba, type);
+
+ list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ BT_DBG("addr %pMR type %d",
+ &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
+
+ if (bacmp(&peer->conn->hcon->dst, ba))
+ continue;
+
+ if (type == peer->conn->hcon->dst_type)
+ return peer;
+ }
+
+ return NULL;
+}
+
+static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
+ struct l2cap_conn *conn)
+{
+ struct lowpan_peer *peer, *tmp;
+
+ list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ if (peer->conn == conn)
+ return peer;
+ }
+
+ return NULL;
+}
+
+static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
+{
+ struct lowpan_dev *entry, *tmp;
+ struct lowpan_peer *peer = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ peer = peer_lookup_conn(entry, conn);
+ if (peer)
+ break;
+ }
+
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ return peer;
+}
+
+static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
+{
+ struct lowpan_dev *entry, *tmp;
+ struct lowpan_dev *dev = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ if (conn->hcon->hdev == entry->hdev) {
+ dev = entry;
+ break;
+ }
+ }
+
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ return dev;
+}
+
+static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *skb_cp;
+ int ret;
+
+ skb_cp = skb_copy(skb, GFP_ATOMIC);
+ if (!skb_cp)
+ return -ENOMEM;
+
+ ret = netif_rx(skb_cp);
+
+ BT_DBG("receive skb %d", ret);
+ if (ret < 0)
+ return NET_RX_DROP;
+
+ return ret;
+}
+
+static int process_data(struct sk_buff *skb, struct net_device *netdev,
+ struct l2cap_conn *conn)
+{
+ const u8 *saddr, *daddr;
+ u8 iphc0, iphc1;
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ unsigned long flags;
+
+ dev = lowpan_dev(netdev);
+
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_conn(dev, conn);
+ read_unlock_irqrestore(&devices_lock, flags);
+ if (!peer)
+ goto drop;
+
+ saddr = peer->eui64_addr;
+ daddr = dev->netdev->dev_addr;
+
+ /* at least two bytes will be used for the encoding */
+ if (skb->len < 2)
+ goto drop;
+
+ if (lowpan_fetch_skb_u8(skb, &iphc0))
+ goto drop;
+
+ if (lowpan_fetch_skb_u8(skb, &iphc1))
+ goto drop;
+
+ return lowpan_process_data(skb, netdev,
+ saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
+ daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
+ iphc0, iphc1, give_skb_to_upper);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+ struct l2cap_conn *conn)
+{
+ struct sk_buff *local_skb;
+ int ret;
+
+ if (!netif_running(dev))
+ goto drop;
+
+ if (dev->type != ARPHRD_6LOWPAN)
+ goto drop;
+
+ /* check that it's our buffer */
+ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
+ /* Copy the packet so that the IPv6 header is
+ * properly aligned.
+ */
+ local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
+ skb_tailroom(skb), GFP_ATOMIC);
+ if (!local_skb)
+ goto drop;
+
+ local_skb->protocol = htons(ETH_P_IPV6);
+ local_skb->pkt_type = PACKET_HOST;
+
+ skb_reset_network_header(local_skb);
+ skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
+
+ if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
+ kfree_skb(local_skb);
+ goto drop;
+ }
+
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+
+ kfree_skb(local_skb);
+ kfree_skb(skb);
+ } else {
+ switch (skb->data[0] & 0xe0) {
+ case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
+ local_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!local_skb)
+ goto drop;
+
+ ret = process_data(local_skb, dev, conn);
+ if (ret != NET_RX_SUCCESS)
+ goto drop;
+
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+
+ kfree_skb(skb);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NET_RX_SUCCESS;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+/* Packet from BT LE device */
+int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ int err;
+
+ peer = lookup_peer(conn);
+ if (!peer)
+ return -ENOENT;
+
+ dev = lookup_dev(conn);
+ if (!dev || !dev->netdev)
+ return -ENOENT;
+
+ err = recv_pkt(skb, dev->netdev, conn);
+ BT_DBG("recv pkt %d", err);
+
+ return err;
+}
+
+static inline int skbuff_copy(void *msg, int len, int count, int mtu,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff **frag;
+ int sent = 0;
+
+ memcpy(skb_put(skb, count), msg, count);
+
+ sent += count;
+ msg += count;
+ len -= count;
+
+ dev->stats.tx_bytes += count;
+ dev->stats.tx_packets++;
+
+ raw_dump_table(__func__, "Sending", skb->data, skb->len);
+
+ /* Continuation fragments (no L2CAP header) */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len > 0) {
+ struct sk_buff *tmp;
+
+ count = min_t(unsigned int, mtu, len);
+
+ tmp = bt_skb_alloc(count, GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ *frag = tmp;
+
+ memcpy(skb_put(*frag, count), msg, count);
+
+ raw_dump_table(__func__, "Sending fragment",
+ (*frag)->data, count);
+
+ (*frag)->priority = skb->priority;
+
+ sent += count;
+ msg += count;
+ len -= count;
+
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
+ frag = &(*frag)->next;
+
+ dev->stats.tx_bytes += count;
+ dev->stats.tx_packets++;
+ }
+
+ return sent;
+}
+
+static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
+ size_t len, u32 priority,
+ struct net_device *dev)
+{
+ struct sk_buff *skb;
+ int err, count;
+ struct l2cap_hdr *lh;
+
+ /* FIXME: This mtu check should be not needed and atm is only used for
+ * testing purposes
+ */
+ if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
+ conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
+
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
+
+ BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
+
+ skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb->priority = priority;
+
+ lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
+ lh->len = cpu_to_le16(len);
+
+ err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ BT_DBG("skbuff copy %d failed", err);
+ return ERR_PTR(err);
+ }
+
+ return skb;
+}
+
+static int conn_send(struct l2cap_conn *conn,
+ void *msg, size_t len, u32 priority,
+ struct net_device *dev)
+{
+ struct sk_buff *skb;
+
+ skb = create_pdu(conn, msg, len, priority, dev);
+ if (IS_ERR(skb))
+ return -EINVAL;
+
+ BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
+ skb->priority);
+
+ hci_send_acl(conn->hchan, skb, ACL_START);
+
+ return 0;
+}
+
+static u8 get_addr_type_from_eui64(u8 byte)
+{
+ /* Is universal(0) or local(1) bit, */
+ if (byte & 0x02)
+ return ADDR_LE_DEV_RANDOM;
+
+ return ADDR_LE_DEV_PUBLIC;
+}
+
+static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
+{
+ u8 *eui64 = ip6_daddr->s6_addr + 8;
+
+ addr->b[0] = eui64[7];
+ addr->b[1] = eui64[6];
+ addr->b[2] = eui64[5];
+ addr->b[3] = eui64[2];
+ addr->b[4] = eui64[1];
+ addr->b[5] = eui64[0];
+}
+
+static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
+ bdaddr_t *addr, u8 *addr_type)
+{
+ copy_to_bdaddr(ip6_daddr, addr);
+
+ /* We need to toggle the U/L bit that we got from IPv6 address
+ * so that we get the proper address and type of the BD address.
+ */
+ addr->b[5] ^= 0x02;
+
+ *addr_type = get_addr_type_from_eui64(addr->b[5]);
+}
+
+static int header_create(struct sk_buff *skb, struct net_device *netdev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len)
+{
+ struct ipv6hdr *hdr;
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ bdaddr_t addr, *any = BDADDR_ANY;
+ u8 *saddr, *daddr = any->b;
+ u8 addr_type;
+
+ if (type != ETH_P_IPV6)
+ return -EINVAL;
+
+ hdr = ipv6_hdr(skb);
+
+ dev = lowpan_dev(netdev);
+
+ if (ipv6_addr_is_multicast(&hdr->daddr)) {
+ memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
+ sizeof(struct in6_addr));
+ lowpan_cb(skb)->conn = NULL;
+ } else {
+ unsigned long flags;
+
+ /* Get destination BT device from skb.
+ * If there is no such peer then discard the packet.
+ */
+ convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
+
+ BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
+ addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
+ &hdr->daddr);
+
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_ba(dev, &addr, addr_type);
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ if (!peer) {
+ BT_DBG("no such peer %pMR found", &addr);
+ return -ENOENT;
+ }
+
+ daddr = peer->eui64_addr;
+
+ memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
+ sizeof(struct in6_addr));
+ lowpan_cb(skb)->conn = peer->conn;
+ }
+
+ saddr = dev->netdev->dev_addr;
+
+ return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
+}
+
+/* Packet to BT LE device */
+static int send_pkt(struct l2cap_conn *conn, const void *saddr,
+ const void *daddr, struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ raw_dump_table(__func__, "raw skb data dump before fragmentation",
+ skb->data, skb->len);
+
+ return conn_send(conn, skb->data, skb->len, 0, netdev);
+}
+
+static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct sk_buff *local_skb;
+ struct lowpan_dev *entry, *tmp;
+ unsigned long flags;
+
+ read_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ struct lowpan_peer *pentry, *ptmp;
+ struct lowpan_dev *dev;
+
+ if (entry->netdev != netdev)
+ continue;
+
+ dev = lowpan_dev(entry->netdev);
+
+ list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
+ local_skb = skb_clone(skb, GFP_ATOMIC);
+
+ send_pkt(pentry->conn, netdev->dev_addr,
+ pentry->eui64_addr, local_skb, netdev);
+
+ kfree_skb(local_skb);
+ }
+ }
+
+ read_unlock_irqrestore(&devices_lock, flags);
+}
+
+static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ int err = 0;
+ unsigned char *eui64_addr;
+ struct lowpan_dev *dev;
+ struct lowpan_peer *peer;
+ bdaddr_t addr;
+ u8 addr_type;
+
+ if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
+ /* We need to send the packet to every device
+ * behind this interface.
+ */
+ send_mcast_pkt(skb, netdev);
+ } else {
+ unsigned long flags;
+
+ convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
+ eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
+ dev = lowpan_dev(netdev);
+
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_ba(dev, &addr, addr_type);
+ read_unlock_irqrestore(&devices_lock, flags);
+
+ BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
+ netdev->name, &addr,
+ addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
+ &lowpan_cb(skb)->addr, peer);
+
+ if (peer && peer->conn)
+ err = send_pkt(peer->conn, netdev->dev_addr,
+ eui64_addr, skb, netdev);
+ }
+ dev_kfree_skb(skb);
+
+ if (err)
+ BT_DBG("ERROR: xmit failed (%d)", err);
+
+ return (err < 0) ? NET_XMIT_DROP : err;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_start_xmit = bt_xmit,
+};
+
+static struct header_ops header_ops = {
+ .create = header_create,
+};
+
+static void netdev_setup(struct net_device *dev)
+{
+ dev->addr_len = EUI64_ADDR_LEN;
+ dev->type = ARPHRD_6LOWPAN;
+
+ dev->hard_header_len = 0;
+ dev->needed_tailroom = 0;
+ dev->mtu = IPV6_MIN_MTU;
+ dev->tx_queue_len = 0;
+ dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
+ dev->watchdog_timeo = 0;
+
+ dev->netdev_ops = &netdev_ops;
+ dev->header_ops = &header_ops;
+ dev->destructor = free_netdev;
+}
+
+static struct device_type bt_type = {
+ .name = "bluetooth",
+};
+
+static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
+{
+ /* addr is the BT address in little-endian format */
+ eui[0] = addr[5];
+ eui[1] = addr[4];
+ eui[2] = addr[3];
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[5] = addr[2];
+ eui[6] = addr[1];
+ eui[7] = addr[0];
+
+ /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
+ if (addr_type == ADDR_LE_DEV_PUBLIC)
+ eui[0] &= ~0x02;
+ else
+ eui[0] |= 0x02;
+
+ BT_DBG("type %d addr %*phC", addr_type, 8, eui);
+}
+
+static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
+ u8 addr_type)
+{
+ netdev->addr_assign_type = NET_ADDR_PERM;
+ set_addr(netdev->dev_addr, addr->b, addr_type);
+}
+
+static void ifup(struct net_device *netdev)
+{
+ int err;
+
+ rtnl_lock();
+ err = dev_open(netdev);
+ if (err < 0)
+ BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
+ rtnl_unlock();
+}
+
+static void do_notify_peers(struct work_struct *work)
+{
+ struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
+ notify_peers.work);
+
+ netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
+}
+
+static bool is_bt_6lowpan(struct hci_conn *hcon)
+{
+ if (hcon->type != LE_LINK)
+ return false;
+
+ return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
+}
+
+static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
+{
+ struct lowpan_peer *peer;
+ unsigned long flags;
+
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ return -ENOMEM;
+
+ peer->conn = conn;
+ memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
+
+ /* RFC 2464 ch. 5 */
+ peer->peer_addr.s6_addr[0] = 0xFE;
+ peer->peer_addr.s6_addr[1] = 0x80;
+ set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
+ conn->hcon->dst_type);
+
+ memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
+ EUI64_ADDR_LEN);
+
+ write_lock_irqsave(&devices_lock, flags);
+ INIT_LIST_HEAD(&peer->list);
+ peer_add(dev, peer);
+ write_unlock_irqrestore(&devices_lock, flags);
+
+ /* Notifying peers about us needs to be done without locks held */
+ INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
+ schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
+
+ return 0;
+}
+
+/* This gets called when BT LE 6LoWPAN device is connected. We then
+ * create network device that acts as a proxy between BT LE device
+ * and kernel network stack.
+ */
+int bt_6lowpan_add_conn(struct l2cap_conn *conn)
+{
+ struct lowpan_peer *peer = NULL;
+ struct lowpan_dev *dev;
+ struct net_device *netdev;
+ int err = 0;
+ unsigned long flags;
+
+ if (!is_bt_6lowpan(conn->hcon))
+ return 0;
+
+ peer = lookup_peer(conn);
+ if (peer)
+ return -EEXIST;
+
+ dev = lookup_dev(conn);
+ if (dev)
+ return add_peer_conn(conn, dev);
+
+ netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
+
+ netdev->netdev_ops = &netdev_ops;
+ SET_NETDEV_DEV(netdev, &conn->hcon->dev);
+ SET_NETDEV_DEVTYPE(netdev, &bt_type);
+
+ err = register_netdev(netdev);
+ if (err < 0) {
+ BT_INFO("register_netdev failed %d", err);
+ free_netdev(netdev);
+ goto out;
+ }
+
+ BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
+ netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
+ set_bit(__LINK_STATE_PRESENT, &netdev->state);
+
+ dev = netdev_priv(netdev);
+ dev->netdev = netdev;
+ dev->hdev = conn->hcon->hdev;
+ INIT_LIST_HEAD(&dev->peers);
+
+ write_lock_irqsave(&devices_lock, flags);
+ INIT_LIST_HEAD(&dev->list);
+ list_add(&dev->list, &bt_6lowpan_devices);
+ write_unlock_irqrestore(&devices_lock, flags);
+
+ ifup(netdev);
+
+ return add_peer_conn(conn, dev);
+
+out:
+ return err;
+}
+
+static void delete_netdev(struct work_struct *work)
+{
+ struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
+ delete_netdev);
+
+ unregister_netdev(entry->netdev);
+
+ /* The entry pointer is deleted in device_event() */
+}
+
+int bt_6lowpan_del_conn(struct l2cap_conn *conn)
+{
+ struct lowpan_dev *entry, *tmp;
+ struct lowpan_dev *dev = NULL;
+ struct lowpan_peer *peer;
+ int err = -ENOENT;
+ unsigned long flags;
+ bool last = false;
+
+ if (!conn || !is_bt_6lowpan(conn->hcon))
+ return 0;
+
+ write_lock_irqsave(&devices_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+ dev = lowpan_dev(entry->netdev);
+ peer = peer_lookup_conn(dev, conn);
+ if (peer) {
+ last = peer_del(dev, peer);
+ err = 0;
+ break;
+ }
+ }
+
+ if (!err && last && dev && !atomic_read(&dev->peer_count)) {
+ write_unlock_irqrestore(&devices_lock, flags);
+
+ cancel_delayed_work_sync(&dev->notify_peers);
+
+ /* bt_6lowpan_del_conn() is called with hci dev lock held which
+ * means that we must delete the netdevice in worker thread.
+ */
+ INIT_WORK(&entry->delete_netdev, delete_netdev);
+ schedule_work(&entry->delete_netdev);
+ } else {
+ write_unlock_irqrestore(&devices_lock, flags);
+ }
+
+ return err;
+}
+
+static int device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct lowpan_dev *entry, *tmp;
+ unsigned long flags;
+
+ if (netdev->type != ARPHRD_6LOWPAN)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ write_lock_irqsave(&devices_lock, flags);
+ list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
+ list) {
+ if (entry->netdev == netdev) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ write_unlock_irqrestore(&devices_lock, flags);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block bt_6lowpan_dev_notifier = {
+ .notifier_call = device_event,
+};
+
+int bt_6lowpan_init(void)
+{
+ return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
+}
+
+void bt_6lowpan_cleanup(void)
+{
+ unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
+}
diff --git a/net/bluetooth/6lowpan.h b/net/bluetooth/6lowpan.h
new file mode 100644
index 00000000000..5d281f1eaf5
--- /dev/null
+++ b/net/bluetooth/6lowpan.h
@@ -0,0 +1,47 @@
+/*
+ Copyright (c) 2013 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#ifndef __6LOWPAN_H
+#define __6LOWPAN_H
+
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/bluetooth/l2cap.h>
+
+#if IS_ENABLED(CONFIG_BT_6LOWPAN)
+int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb);
+int bt_6lowpan_add_conn(struct l2cap_conn *conn);
+int bt_6lowpan_del_conn(struct l2cap_conn *conn);
+int bt_6lowpan_init(void);
+void bt_6lowpan_cleanup(void);
+#else
+static int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+static int bt_6lowpan_add_conn(struct l2cap_conn *conn)
+{
+ return -EOPNOTSUPP;
+}
+int bt_6lowpan_del_conn(struct l2cap_conn *conn)
+{
+ return -EOPNOTSUPP;
+}
+static int bt_6lowpan_init(void)
+{
+ return -EOPNOTSUPP;
+}
+static void bt_6lowpan_cleanup(void) { }
+#endif
+
+#endif /* __6LOWPAN_H */
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ed371684c13..06ec14499ca 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,6 +6,13 @@ menuconfig BT
tristate "Bluetooth subsystem support"
depends on NET && !S390
depends on RFKILL || !RFKILL
+ select 6LOWPAN_IPHC if BT_6LOWPAN
+ select CRC16
+ select CRYPTO
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ select CRYPTO_SHA256
help
Bluetooth is low-cost, low-power, short-range wireless technology.
It was designed as a replacement for cables and other short-range
@@ -14,10 +21,12 @@ menuconfig BT
Bluetooth can be found at <http://www.bluetooth.com/>.
Linux Bluetooth subsystem consist of several layers:
- Bluetooth Core (HCI device and connection manager, scheduler)
+ Bluetooth Core
+ HCI device and connection manager, scheduler
+ SCO audio links
+ L2CAP (Logical Link Control and Adaptation Protocol)
+ SMP (Security Manager Protocol) on LE (Low Energy) links
HCI Device drivers (Interface to the hardware)
- SCO Module (SCO audio links)
- L2CAP Module (Logical Link Control and Adaptation Protocol)
RFCOMM Module (RFCOMM Protocol)
BNEP Module (Bluetooth Network Encapsulation Protocol)
CMTP Module (CAPI Message Transport Protocol)
@@ -27,31 +36,15 @@ menuconfig BT
compile it as module (bluetooth).
To use Linux Bluetooth subsystem, you will need several user-space
- utilities like hciconfig and hcid. These utilities and updates to
- Bluetooth kernel modules are provided in the BlueZ packages.
- For more information, see <http://www.bluez.org/>.
+ utilities like hciconfig and bluetoothd. These utilities and updates
+ to Bluetooth kernel modules are provided in the BlueZ packages. For
+ more information, see <http://www.bluez.org/>.
-config BT_L2CAP
- tristate "L2CAP protocol support"
- depends on BT
- select CRC16
+config BT_6LOWPAN
+ bool "Bluetooth 6LoWPAN support"
+ depends on BT && IPV6
help
- L2CAP (Logical Link Control and Adaptation Protocol) provides
- connection oriented and connection-less data transport. L2CAP
- support is required for most Bluetooth applications.
-
- Say Y here to compile L2CAP support into the kernel or say M to
- compile it as module (l2cap).
-
-config BT_SCO
- tristate "SCO links support"
- depends on BT
- help
- SCO link provides voice transport over Bluetooth. SCO support is
- required for voice applications like Headset and Audio.
-
- Say Y here to compile SCO support into the kernel or say M to
- compile it as module (sco).
+ IPv6 compression over Bluetooth.
source "net/bluetooth/rfcomm/Kconfig"
@@ -62,4 +55,3 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
source "drivers/bluetooth/Kconfig"
-
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 250f954f021..ca51246b101 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -3,11 +3,14 @@
#
obj-$(CONFIG_BT) += bluetooth.o
-obj-$(CONFIG_BT_L2CAP) += l2cap.o
-obj-$(CONFIG_BT_SCO) += sco.o
obj-$(CONFIG_BT_RFCOMM) += rfcomm/
obj-$(CONFIG_BT_BNEP) += bnep/
obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
-bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
+bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
+ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
+ a2mp.o amp.o
+bluetooth-$(CONFIG_BT_6LOWPAN) += 6lowpan.o
+
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 00000000000..9514cc9e850
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,1023 @@
+/*
+ Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+#include "a2mp.h"
+#include "amp.h"
+
+/* Global AMP Manager list */
+LIST_HEAD(amp_mgr_list);
+DEFINE_MUTEX(amp_mgr_list_lock);
+
+/* A2MP build & send command helper functions */
+static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
+{
+ struct a2mp_cmd *cmd;
+ int plen;
+
+ plen = sizeof(*cmd) + len;
+ cmd = kzalloc(plen, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->code = code;
+ cmd->ident = ident;
+ cmd->len = cpu_to_le16(len);
+
+ memcpy(cmd->data, data, len);
+
+ return cmd;
+}
+
+void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
+{
+ struct l2cap_chan *chan = mgr->a2mp_chan;
+ struct a2mp_cmd *cmd;
+ u16 total_len = len + sizeof(*cmd);
+ struct kvec iv;
+ struct msghdr msg;
+
+ cmd = __a2mp_build(code, ident, len, data);
+ if (!cmd)
+ return;
+
+ iv.iov_base = cmd;
+ iv.iov_len = total_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.msg_iov = (struct iovec *) &iv;
+ msg.msg_iovlen = 1;
+
+ l2cap_chan_send(chan, &msg, total_len, 0);
+
+ kfree(cmd);
+}
+
+u8 __next_ident(struct amp_mgr *mgr)
+{
+ if (++mgr->ident == 0)
+ mgr->ident = 1;
+
+ return mgr->ident;
+}
+
+/* hci_dev_list shall be locked */
+static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
+{
+ struct hci_dev *hdev;
+ int i = 1;
+
+ cl[0].id = AMP_ID_BREDR;
+ cl[0].type = AMP_TYPE_BREDR;
+ cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (hdev->dev_type == HCI_AMP) {
+ cl[i].id = hdev->id;
+ cl[i].type = hdev->amp_type;
+ if (test_bit(HCI_UP, &hdev->flags))
+ cl[i].status = hdev->amp_status;
+ else
+ cl[i].status = AMP_STATUS_POWERED_DOWN;
+ i++;
+ }
+ }
+}
+
+/* Processing A2MP messages */
+static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cmd_rej *rej = (void *) skb->data;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*rej))
+ return -EINVAL;
+
+ BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
+
+ skb_pull(skb, sizeof(*rej));
+
+ return 0;
+}
+
+static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_discov_req *req = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct a2mp_discov_rsp *rsp;
+ u16 ext_feat;
+ u8 num_ctrl;
+ struct hci_dev *hdev;
+
+ if (len < sizeof(*req))
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(*req));
+
+ ext_feat = le16_to_cpu(req->ext_feat);
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
+
+ /* check that packet is not broken for now */
+ while (ext_feat & A2MP_FEAT_EXT) {
+ if (len < sizeof(ext_feat))
+ return -EINVAL;
+
+ ext_feat = get_unaligned_le16(skb->data);
+ BT_DBG("efm 0x%4.4x", ext_feat);
+ len -= sizeof(ext_feat);
+ skb_pull(skb, sizeof(ext_feat));
+ }
+
+ read_lock(&hci_dev_list_lock);
+
+ /* at minimum the BR/EDR needs to be listed */
+ num_ctrl = 1;
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (hdev->dev_type == HCI_AMP)
+ num_ctrl++;
+ }
+
+ len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
+ rsp = kmalloc(len, GFP_ATOMIC);
+ if (!rsp) {
+ read_unlock(&hci_dev_list_lock);
+ return -ENOMEM;
+ }
+
+ rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ rsp->ext_feat = 0;
+
+ __a2mp_add_cl(mgr, rsp->cl);
+
+ read_unlock(&hci_dev_list_lock);
+
+ a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
+
+ kfree(rsp);
+ return 0;
+}
+
+static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_discov_rsp *rsp = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct a2mp_cl *cl;
+ u16 ext_feat;
+ bool found = false;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ len -= sizeof(*rsp);
+ skb_pull(skb, sizeof(*rsp));
+
+ ext_feat = le16_to_cpu(rsp->ext_feat);
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
+
+ /* check that packet is not broken for now */
+ while (ext_feat & A2MP_FEAT_EXT) {
+ if (len < sizeof(ext_feat))
+ return -EINVAL;
+
+ ext_feat = get_unaligned_le16(skb->data);
+ BT_DBG("efm 0x%4.4x", ext_feat);
+ len -= sizeof(ext_feat);
+ skb_pull(skb, sizeof(ext_feat));
+ }
+
+ cl = (void *) skb->data;
+ while (len >= sizeof(*cl)) {
+ BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
+ cl->status);
+
+ if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
+ struct a2mp_info_req req;
+
+ found = true;
+ req.id = cl->id;
+ a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
+ sizeof(req), &req);
+ }
+
+ len -= sizeof(*cl);
+ cl = (void *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* Fall back to L2CAP init sequence */
+ if (!found) {
+ struct l2cap_conn *conn = mgr->l2cap_conn;
+ struct l2cap_chan *chan;
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+
+ BT_DBG("chan %p state %s", chan,
+ state_to_string(chan->state));
+
+ if (chan->scid == L2CAP_CID_A2MP)
+ continue;
+
+ l2cap_chan_lock(chan);
+
+ if (chan->state == BT_CONNECT)
+ l2cap_send_conn_req(chan);
+
+ l2cap_chan_unlock(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+ }
+
+ return 0;
+}
+
+static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cl *cl = (void *) skb->data;
+
+ while (skb->len >= sizeof(*cl)) {
+ BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
+ cl->status);
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* TODO send A2MP_CHANGE_RSP */
+
+ return 0;
+}
+
+static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_info_req *req = (void *) skb->data;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ hdev = hci_dev_get(req->id);
+ if (!hdev || hdev->dev_type != HCI_AMP) {
+ struct a2mp_info_rsp rsp;
+
+ rsp.id = req->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+
+ goto done;
+ }
+
+ set_bit(READ_LOC_AMP_INFO, &mgr->state);
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+
+done:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
+ struct a2mp_amp_assoc_req req;
+ struct amp_ctrl *ctrl;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*rsp))
+ return -EINVAL;
+
+ BT_DBG("id %d status 0x%2.2x", rsp->id, rsp->status);
+
+ if (rsp->status)
+ return -EINVAL;
+
+ ctrl = amp_ctrl_add(mgr, rsp->id);
+ if (!ctrl)
+ return -ENOMEM;
+
+ req.id = rsp->id;
+ a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
+ &req);
+
+ skb_pull(skb, sizeof(*rsp));
+ return 0;
+}
+
+static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_amp_assoc_req *req = (void *) skb->data;
+ struct hci_dev *hdev;
+ struct amp_mgr *tmp;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ /* Make sure that other request is not processed */
+ tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+
+ hdev = hci_dev_get(req->id);
+ if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
+ struct a2mp_amp_assoc_rsp rsp;
+ rsp.id = req->id;
+
+ if (tmp) {
+ rsp.status = A2MP_STATUS_COLLISION_OCCURED;
+ amp_mgr_put(tmp);
+ } else {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ }
+
+ a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+
+ goto done;
+ }
+
+ amp_read_loc_assoc(hdev, mgr);
+
+done:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct hci_dev *hdev;
+ struct amp_ctrl *ctrl;
+ struct hci_conn *hcon;
+ size_t assoc_len;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ assoc_len = len - sizeof(*rsp);
+
+ BT_DBG("id %d status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
+ assoc_len);
+
+ if (rsp->status)
+ return -EINVAL;
+
+ /* Save remote ASSOC data */
+ ctrl = amp_ctrl_lookup(mgr, rsp->id);
+ if (ctrl) {
+ u8 *assoc;
+
+ assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
+ if (!assoc) {
+ amp_ctrl_put(ctrl);
+ return -ENOMEM;
+ }
+
+ ctrl->assoc = assoc;
+ ctrl->assoc_len = assoc_len;
+ ctrl->assoc_rem_len = assoc_len;
+ ctrl->assoc_len_so_far = 0;
+
+ amp_ctrl_put(ctrl);
+ }
+
+ /* Create Phys Link */
+ hdev = hci_dev_get(rsp->id);
+ if (!hdev)
+ return -EINVAL;
+
+ hcon = phylink_add(hdev, mgr, rsp->id, true);
+ if (!hcon)
+ goto done;
+
+ BT_DBG("Created hcon %p: loc:%d -> rem:%d", hcon, hdev->id, rsp->id);
+
+ mgr->bredr_chan->remote_amp_id = rsp->id;
+
+ amp_create_phylink(hdev, mgr, hcon);
+
+done:
+ hci_dev_put(hdev);
+ skb_pull(skb, len);
+ return 0;
+}
+
+static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+ struct hci_conn *hcon;
+ struct amp_ctrl *ctrl;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+
+ hdev = hci_dev_get(req->remote_id);
+ if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
+ if (!ctrl) {
+ ctrl = amp_ctrl_add(mgr, rsp.remote_id);
+ if (ctrl) {
+ amp_ctrl_get(ctrl);
+ } else {
+ rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+ goto send_rsp;
+ }
+ }
+
+ if (ctrl) {
+ size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
+ u8 *assoc;
+
+ assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
+ if (!assoc) {
+ amp_ctrl_put(ctrl);
+ return -ENOMEM;
+ }
+
+ ctrl->assoc = assoc;
+ ctrl->assoc_len = assoc_len;
+ ctrl->assoc_rem_len = assoc_len;
+ ctrl->assoc_len_so_far = 0;
+
+ amp_ctrl_put(ctrl);
+ }
+
+ hcon = phylink_add(hdev, mgr, req->local_id, false);
+ if (hcon) {
+ amp_accept_phylink(hdev, mgr, hcon);
+ rsp.status = A2MP_STATUS_SUCCESS;
+ } else {
+ rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+ }
+
+send_rsp:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ /* Reply error now and success after HCI Write Remote AMP Assoc
+ command complete with success status
+ */
+ if (rsp.status != A2MP_STATUS_SUCCESS) {
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
+ sizeof(rsp), &rsp);
+ } else {
+ set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
+ mgr->ident = hdr->ident;
+ }
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+ struct hci_conn *hcon;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+ rsp.status = A2MP_STATUS_SUCCESS;
+
+ hdev = hci_dev_get(req->remote_id);
+ if (!hdev) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+ &mgr->l2cap_conn->hcon->dst);
+ if (!hcon) {
+ BT_ERR("No phys link exist");
+ rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
+ goto clean;
+ }
+
+ /* TODO Disconnect Phys Link here */
+
+clean:
+ hci_dev_put(hdev);
+
+send_rsp:
+ a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ BT_DBG("ident %d code 0x%2.2x", hdr->ident, hdr->code);
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+/* Handle A2MP signalling */
+static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct a2mp_cmd *hdr;
+ struct amp_mgr *mgr = chan->data;
+ int err = 0;
+
+ amp_mgr_get(mgr);
+
+ while (skb->len >= sizeof(*hdr)) {
+ u16 len;
+
+ hdr = (void *) skb->data;
+ len = le16_to_cpu(hdr->len);
+
+ BT_DBG("code 0x%2.2x id %d len %u", hdr->code, hdr->ident, len);
+
+ skb_pull(skb, sizeof(*hdr));
+
+ if (len > skb->len || !hdr->ident) {
+ err = -EINVAL;
+ break;
+ }
+
+ mgr->ident = hdr->ident;
+
+ switch (hdr->code) {
+ case A2MP_COMMAND_REJ:
+ a2mp_command_rej(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCOVER_REQ:
+ err = a2mp_discover_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_NOTIFY:
+ err = a2mp_change_notify(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETINFO_REQ:
+ err = a2mp_getinfo_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETAMPASSOC_REQ:
+ err = a2mp_getampassoc_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CREATEPHYSLINK_REQ:
+ err = a2mp_createphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCONNPHYSLINK_REQ:
+ err = a2mp_discphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCOVER_RSP:
+ err = a2mp_discover_rsp(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETINFO_RSP:
+ err = a2mp_getinfo_rsp(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETAMPASSOC_RSP:
+ err = a2mp_getampassoc_rsp(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_RSP:
+ case A2MP_CREATEPHYSLINK_RSP:
+ case A2MP_DISCONNPHYSLINK_RSP:
+ err = a2mp_cmd_rsp(mgr, skb, hdr);
+ break;
+
+ default:
+ BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (err) {
+ struct a2mp_cmd_rej rej;
+
+ rej.reason = cpu_to_le16(0);
+ hdr = (void *) skb->data;
+
+ BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
+
+ a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
+ &rej);
+ }
+
+ /* Always free skb and return success error code to prevent
+ from sending L2CAP Disconnect over A2MP channel */
+ kfree_skb(skb);
+
+ amp_mgr_put(mgr);
+
+ return 0;
+}
+
+static void a2mp_chan_close_cb(struct l2cap_chan *chan)
+{
+ l2cap_chan_put(chan);
+}
+
+static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
+ int err)
+{
+ struct amp_mgr *mgr = chan->data;
+
+ if (!mgr)
+ return;
+
+ BT_DBG("chan %p state %s", chan, state_to_string(state));
+
+ chan->state = state;
+
+ switch (state) {
+ case BT_CLOSED:
+ if (mgr)
+ amp_mgr_put(mgr);
+ break;
+ }
+}
+
+static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long len, int nb)
+{
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(len, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ return skb;
+}
+
+static struct l2cap_ops a2mp_chan_ops = {
+ .name = "L2CAP A2MP channel",
+ .recv = a2mp_chan_recv_cb,
+ .close = a2mp_chan_close_cb,
+ .state_change = a2mp_chan_state_change_cb,
+ .alloc_skb = a2mp_chan_alloc_skb_cb,
+
+ /* Not implemented for A2MP */
+ .new_connection = l2cap_chan_no_new_connection,
+ .teardown = l2cap_chan_no_teardown,
+ .ready = l2cap_chan_no_ready,
+ .defer = l2cap_chan_no_defer,
+ .resume = l2cap_chan_no_resume,
+ .set_shutdown = l2cap_chan_no_set_shutdown,
+ .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
+};
+
+static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
+{
+ struct l2cap_chan *chan;
+ int err;
+
+ chan = l2cap_chan_create();
+ if (!chan)
+ return NULL;
+
+ BT_DBG("chan %p", chan);
+
+ chan->chan_type = L2CAP_CHAN_FIXED;
+ chan->scid = L2CAP_CID_A2MP;
+ chan->dcid = L2CAP_CID_A2MP;
+ chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
+ chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
+ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
+ chan->ops = &a2mp_chan_ops;
+
+ l2cap_chan_set_defaults(chan);
+ chan->remote_max_tx = chan->max_tx;
+ chan->remote_tx_win = chan->tx_win;
+
+ chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ chan->mode = L2CAP_MODE_ERTM;
+
+ err = l2cap_ertm_init(chan);
+ if (err < 0) {
+ l2cap_chan_del(chan, 0);
+ return NULL;
+ }
+
+ chan->conf_state = 0;
+
+ if (locked)
+ __l2cap_chan_add(conn, chan);
+ else
+ l2cap_chan_add(conn, chan);
+
+ chan->remote_mps = chan->omtu;
+ chan->mps = chan->omtu;
+
+ chan->state = BT_CONNECTED;
+
+ return chan;
+}
+
+/* AMP Manager functions */
+struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ kref_get(&mgr->kref);
+
+ return mgr;
+}
+
+static void amp_mgr_destroy(struct kref *kref)
+{
+ struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
+
+ BT_DBG("mgr %p", mgr);
+
+ mutex_lock(&amp_mgr_list_lock);
+ list_del(&mgr->list);
+ mutex_unlock(&amp_mgr_list_lock);
+
+ amp_ctrl_list_flush(mgr);
+ kfree(mgr);
+}
+
+int amp_mgr_put(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ return kref_put(&mgr->kref, &amp_mgr_destroy);
+}
+
+static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
+{
+ struct amp_mgr *mgr;
+ struct l2cap_chan *chan;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ BT_DBG("conn %p mgr %p", conn, mgr);
+
+ mgr->l2cap_conn = conn;
+
+ chan = a2mp_chan_open(conn, locked);
+ if (!chan) {
+ kfree(mgr);
+ return NULL;
+ }
+
+ mgr->a2mp_chan = chan;
+ chan->data = mgr;
+
+ conn->hcon->amp_mgr = mgr;
+
+ kref_init(&mgr->kref);
+
+ /* Remote AMP ctrl list initialization */
+ INIT_LIST_HEAD(&mgr->amp_ctrls);
+ mutex_init(&mgr->amp_ctrls_lock);
+
+ mutex_lock(&amp_mgr_list_lock);
+ list_add(&mgr->list, &amp_mgr_list);
+ mutex_unlock(&amp_mgr_list_lock);
+
+ return mgr;
+}
+
+struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct amp_mgr *mgr;
+
+ if (conn->hcon->type != ACL_LINK)
+ return NULL;
+
+ mgr = amp_mgr_create(conn, false);
+ if (!mgr) {
+ BT_ERR("Could not create AMP manager");
+ return NULL;
+ }
+
+ BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
+
+ return mgr->a2mp_chan;
+}
+
+struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
+{
+ struct amp_mgr *mgr;
+
+ mutex_lock(&amp_mgr_list_lock);
+ list_for_each_entry(mgr, &amp_mgr_list, list) {
+ if (test_and_clear_bit(state, &mgr->state)) {
+ amp_mgr_get(mgr);
+ mutex_unlock(&amp_mgr_list_lock);
+ return mgr;
+ }
+ }
+ mutex_unlock(&amp_mgr_list_lock);
+
+ return NULL;
+}
+
+void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
+{
+ struct amp_mgr *mgr;
+ struct a2mp_info_rsp rsp;
+
+ mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
+ if (!mgr)
+ return;
+
+ BT_DBG("%s mgr %p", hdev->name, mgr);
+
+ rsp.id = hdev->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ if (hdev->amp_type != AMP_TYPE_BREDR) {
+ rsp.status = 0;
+ rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+ rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+ rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+ rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+ rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+ }
+
+ a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
+ amp_mgr_put(mgr);
+}
+
+void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
+{
+ struct amp_mgr *mgr;
+ struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+ struct a2mp_amp_assoc_rsp *rsp;
+ size_t len;
+
+ mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
+ if (!mgr)
+ return;
+
+ BT_DBG("%s mgr %p", hdev->name, mgr);
+
+ len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
+ rsp = kzalloc(len, GFP_KERNEL);
+ if (!rsp) {
+ amp_mgr_put(mgr);
+ return;
+ }
+
+ rsp->id = hdev->id;
+
+ if (status) {
+ rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
+ } else {
+ rsp->status = A2MP_STATUS_SUCCESS;
+ memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
+ }
+
+ a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
+ amp_mgr_put(mgr);
+ kfree(rsp);
+}
+
+void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
+{
+ struct amp_mgr *mgr;
+ struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+ struct a2mp_physlink_req *req;
+ struct l2cap_chan *bredr_chan;
+ size_t len;
+
+ mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
+ if (!mgr)
+ return;
+
+ len = sizeof(*req) + loc_assoc->len;
+
+ BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
+
+ req = kzalloc(len, GFP_KERNEL);
+ if (!req) {
+ amp_mgr_put(mgr);
+ return;
+ }
+
+ bredr_chan = mgr->bredr_chan;
+ if (!bredr_chan)
+ goto clean;
+
+ req->local_id = hdev->id;
+ req->remote_id = bredr_chan->remote_amp_id;
+ memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
+
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
+
+clean:
+ amp_mgr_put(mgr);
+ kfree(req);
+}
+
+void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
+{
+ struct amp_mgr *mgr;
+ struct a2mp_physlink_rsp rsp;
+ struct hci_conn *hs_hcon;
+
+ mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
+ if (!mgr)
+ return;
+
+ hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
+ if (!hs_hcon) {
+ rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
+ } else {
+ rsp.remote_id = hs_hcon->remote_id;
+ rsp.status = A2MP_STATUS_SUCCESS;
+ }
+
+ BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
+ status);
+
+ rsp.local_id = hdev->id;
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
+ amp_mgr_put(mgr);
+}
+
+void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct amp_mgr *mgr = conn->hcon->amp_mgr;
+ struct a2mp_discov_req req;
+
+ BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
+
+ if (!mgr) {
+ mgr = amp_mgr_create(conn, true);
+ if (!mgr)
+ return;
+ }
+
+ mgr->bredr_chan = chan;
+
+ req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ req.ext_feat = 0;
+ a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
+}
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
new file mode 100644
index 00000000000..487b54c1308
--- /dev/null
+++ b/net/bluetooth/a2mp.h
@@ -0,0 +1,150 @@
+/*
+ Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#ifndef __A2MP_H
+#define __A2MP_H
+
+#include <net/bluetooth/l2cap.h>
+
+#define A2MP_FEAT_EXT 0x8000
+
+enum amp_mgr_state {
+ READ_LOC_AMP_INFO,
+ READ_LOC_AMP_ASSOC,
+ READ_LOC_AMP_ASSOC_FINAL,
+ WRITE_REMOTE_AMP_ASSOC,
+};
+
+struct amp_mgr {
+ struct list_head list;
+ struct l2cap_conn *l2cap_conn;
+ struct l2cap_chan *a2mp_chan;
+ struct l2cap_chan *bredr_chan;
+ struct kref kref;
+ __u8 ident;
+ __u8 handle;
+ unsigned long state;
+ unsigned long flags;
+
+ struct list_head amp_ctrls;
+ struct mutex amp_ctrls_lock;
+};
+
+struct a2mp_cmd {
+ __u8 code;
+ __u8 ident;
+ __le16 len;
+ __u8 data[0];
+} __packed;
+
+/* A2MP command codes */
+#define A2MP_COMMAND_REJ 0x01
+struct a2mp_cmd_rej {
+ __le16 reason;
+ __u8 data[0];
+} __packed;
+
+#define A2MP_DISCOVER_REQ 0x02
+struct a2mp_discov_req {
+ __le16 mtu;
+ __le16 ext_feat;
+} __packed;
+
+struct a2mp_cl {
+ __u8 id;
+ __u8 type;
+ __u8 status;
+} __packed;
+
+#define A2MP_DISCOVER_RSP 0x03
+struct a2mp_discov_rsp {
+ __le16 mtu;
+ __le16 ext_feat;
+ struct a2mp_cl cl[0];
+} __packed;
+
+#define A2MP_CHANGE_NOTIFY 0x04
+#define A2MP_CHANGE_RSP 0x05
+
+#define A2MP_GETINFO_REQ 0x06
+struct a2mp_info_req {
+ __u8 id;
+} __packed;
+
+#define A2MP_GETINFO_RSP 0x07
+struct a2mp_info_rsp {
+ __u8 id;
+ __u8 status;
+ __le32 total_bw;
+ __le32 max_bw;
+ __le32 min_latency;
+ __le16 pal_cap;
+ __le16 assoc_size;
+} __packed;
+
+#define A2MP_GETAMPASSOC_REQ 0x08
+struct a2mp_amp_assoc_req {
+ __u8 id;
+} __packed;
+
+#define A2MP_GETAMPASSOC_RSP 0x09
+struct a2mp_amp_assoc_rsp {
+ __u8 id;
+ __u8 status;
+ __u8 amp_assoc[0];
+} __packed;
+
+#define A2MP_CREATEPHYSLINK_REQ 0x0A
+#define A2MP_DISCONNPHYSLINK_REQ 0x0C
+struct a2mp_physlink_req {
+ __u8 local_id;
+ __u8 remote_id;
+ __u8 amp_assoc[0];
+} __packed;
+
+#define A2MP_CREATEPHYSLINK_RSP 0x0B
+#define A2MP_DISCONNPHYSLINK_RSP 0x0D
+struct a2mp_physlink_rsp {
+ __u8 local_id;
+ __u8 remote_id;
+ __u8 status;
+} __packed;
+
+/* A2MP response status */
+#define A2MP_STATUS_SUCCESS 0x00
+#define A2MP_STATUS_INVALID_CTRL_ID 0x01
+#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
+#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
+#define A2MP_STATUS_COLLISION_OCCURED 0x03
+#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
+#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
+#define A2MP_STATUS_SECURITY_VIOLATION 0x06
+
+extern struct list_head amp_mgr_list;
+extern struct mutex amp_mgr_list_lock;
+
+struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+int amp_mgr_put(struct amp_mgr *mgr);
+u8 __next_ident(struct amp_mgr *mgr);
+struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb);
+struct amp_mgr *amp_mgr_lookup_by_state(u8 state);
+void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data);
+void a2mp_discover_amp(struct l2cap_chan *chan);
+void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
+void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
+void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
+void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
+
+#endif /* __A2MP_H */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index c4cf3f59500..2021c481cdb 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,22 +25,13 @@
/* Bluetooth address family and sockets. */
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <net/sock.h>
+#include <linux/debugfs.h>
#include <asm/ioctls.h>
-#include <linux/kmod.h>
#include <net/bluetooth/bluetooth.h>
+#include <linux/proc_fs.h>
-#define VERSION "2.15"
+#define VERSION "2.19"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
@@ -71,19 +62,16 @@ static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
-static inline void bt_sock_reclassify_lock(struct socket *sock, int proto)
+void bt_sock_reclassify_lock(struct sock *sk, int proto)
{
- struct sock *sk = sock->sk;
-
- if (!sk)
- return;
-
+ BUG_ON(!sk);
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk,
bt_slock_key_strings[proto], &bt_slock_key[proto],
bt_key_strings[proto], &bt_lock_key[proto]);
}
+EXPORT_SYMBOL(bt_sock_reclassify_lock);
int bt_sock_register(int proto, const struct net_proto_family *ops)
{
@@ -105,23 +93,14 @@ int bt_sock_register(int proto, const struct net_proto_family *ops)
}
EXPORT_SYMBOL(bt_sock_register);
-int bt_sock_unregister(int proto)
+void bt_sock_unregister(int proto)
{
- int err = 0;
-
if (proto < 0 || proto >= BT_MAX_PROTO)
- return -EINVAL;
+ return;
write_lock(&bt_proto_lock);
-
- if (!bt_proto[proto])
- err = -ENOENT;
- else
- bt_proto[proto] = NULL;
-
+ bt_proto[proto] = NULL;
write_unlock(&bt_proto_lock);
-
- return err;
}
EXPORT_SYMBOL(bt_sock_unregister);
@@ -145,7 +124,8 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
err = bt_proto[proto]->create(net, sock, proto, kern);
- bt_sock_reclassify_lock(sock, proto);
+ if (!err)
+ bt_sock_reclassify_lock(sock->sk, proto);
module_put(bt_proto[proto]->owner);
}
@@ -156,17 +136,17 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
- write_lock_bh(&l->lock);
+ write_lock(&l->lock);
sk_add_node(sk, &l->head);
- write_unlock_bh(&l->lock);
+ write_unlock(&l->lock);
}
EXPORT_SYMBOL(bt_sock_link);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
{
- write_lock_bh(&l->lock);
+ write_lock(&l->lock);
sk_del_node_init(sk);
- write_unlock_bh(&l->lock);
+ write_unlock(&l->lock);
}
EXPORT_SYMBOL(bt_sock_unlink);
@@ -212,16 +192,18 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
- bt_sk(parent)->defer_setup) {
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
+
release_sock(sk);
return sk;
}
release_sock(sk);
}
+
return NULL;
}
EXPORT_SYMBOL(bt_accept_dequeue);
@@ -240,14 +222,14 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
+
return err;
}
- msg->msg_namelen = 0;
-
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
@@ -256,9 +238,14 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if (err == 0)
+ if (err == 0) {
sock_recv_ts_and_drops(msg, sk, skb);
+ if (bt_sk(sk)->skb_msg_name)
+ bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
+ &msg->msg_namelen);
+ }
+
skb_free_datagram(sk, skb);
return err ? : copied;
@@ -305,8 +292,6 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
- msg->msg_namelen = 0;
-
BT_DBG("sk %p size %zu", sk, size);
lock_sock(sk);
@@ -323,7 +308,8 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
if (copied >= target)
break;
- if ((err = sock_error(sk)) != 0)
+ err = sock_error(sk);
+ if (err)
break;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
@@ -342,7 +328,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
@@ -354,7 +340,33 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, chunk);
+ int skb_len = skb_headlen(skb);
+
+ if (chunk <= skb_len) {
+ __skb_pull(skb, chunk);
+ } else {
+ struct sk_buff *frag;
+
+ __skb_pull(skb, skb_len);
+ chunk -= skb_len;
+
+ skb_walk_frags(skb, frag) {
+ if (chunk <= frag->len) {
+ /* Pulling partial data */
+ skb->len -= chunk;
+ skb->data_len -= chunk;
+ __skb_pull(frag, chunk);
+ break;
+ } else if (frag->len) {
+ /* Pulling all frag data */
+ chunk -= frag->len;
+ skb->len -= frag->len;
+ skb->data_len -= frag->len;
+ __skb_pull(frag, frag->len);
+ }
+ }
+ }
+
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
@@ -382,15 +394,16 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
if (sk->sk_state == BT_CONNECTED ||
- (bt_sk(parent)->defer_setup &&
- sk->sk_state == BT_CONNECT2))
+ (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
+ sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
return 0;
}
-unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
@@ -403,7 +416,8 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
return bt_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR;
+ mask |= POLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
@@ -422,7 +436,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
sk->sk_state == BT_CONFIG)
return mask;
- if (sock_writeable(sk))
+ if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -479,6 +493,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
EXPORT_SYMBOL(bt_sock_ioctl);
+/* This function expects the sk lock to be held when called */
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
{
DECLARE_WAITQUEUE(wait, current);
@@ -487,9 +502,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
- set_current_state(TASK_INTERRUPTIBLE);
-
if (!timeo) {
err = -EINPROGRESS;
break;
@@ -503,29 +517,204 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
EXPORT_SYMBOL(bt_sock_wait_state);
+/* This function expects the sk lock to be held when called */
+int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long timeo;
+ int err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ return err;
+}
+EXPORT_SYMBOL(bt_sock_wait_ready);
+
+#ifdef CONFIG_PROC_FS
+struct bt_seq_state {
+ struct bt_sock_list *l;
+};
+
+static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(seq->private->l->lock)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ read_lock(&l->lock);
+ return seq_hlist_start_head(&l->head, *pos);
+}
+
+static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ return seq_hlist_next(v, &l->head, pos);
+}
+
+static void bt_seq_stop(struct seq_file *seq, void *v)
+ __releases(seq->private->l->lock)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ read_unlock(&l->lock);
+}
+
+static int bt_seq_show(struct seq_file *seq, void *v)
+{
+ struct bt_seq_state *s = seq->private;
+ struct bt_sock_list *l = s->l;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
+
+ if (l->custom_seq_show) {
+ seq_putc(seq, ' ');
+ l->custom_seq_show(seq, v);
+ }
+
+ seq_putc(seq, '\n');
+ } else {
+ struct sock *sk = sk_entry(v);
+ struct bt_sock *bt = bt_sk(sk);
+
+ seq_printf(seq,
+ "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
+ sk,
+ atomic_read(&sk->sk_refcnt),
+ sk_rmem_alloc_get(sk),
+ sk_wmem_alloc_get(sk),
+ from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk),
+ bt->parent? sock_i_ino(bt->parent): 0LU);
+
+ if (l->custom_seq_show) {
+ seq_putc(seq, ' ');
+ l->custom_seq_show(seq, v);
+ }
+
+ seq_putc(seq, '\n');
+ }
+ return 0;
+}
+
+static struct seq_operations bt_seq_ops = {
+ .start = bt_seq_start,
+ .next = bt_seq_next,
+ .stop = bt_seq_stop,
+ .show = bt_seq_show,
+};
+
+static int bt_seq_open(struct inode *inode, struct file *file)
+{
+ struct bt_sock_list *sk_list;
+ struct bt_seq_state *s;
+
+ sk_list = PDE_DATA(inode);
+ s = __seq_open_private(file, &bt_seq_ops,
+ sizeof(struct bt_seq_state));
+ if (!s)
+ return -ENOMEM;
+
+ s->l = sk_list;
+ return 0;
+}
+
+static const struct file_operations bt_fops = {
+ .open = bt_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list* sk_list,
+ int (* seq_show)(struct seq_file *, void *))
+{
+ sk_list->custom_seq_show = seq_show;
+
+ if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
+ return -ENOMEM;
+ return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+ remove_proc_entry(name, net->proc_net);
+}
+#else
+int bt_procfs_init(struct net *net, const char *name,
+ struct bt_sock_list* sk_list,
+ int (* seq_show)(struct seq_file *, void *))
+{
+ return 0;
+}
+
+void bt_procfs_cleanup(struct net *net, const char *name)
+{
+}
+#endif
+EXPORT_SYMBOL(bt_procfs_init);
+EXPORT_SYMBOL(bt_procfs_cleanup);
+
static struct net_proto_family bt_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_BLUETOOTH,
.create = bt_sock_create,
};
+struct dentry *bt_debugfs;
+EXPORT_SYMBOL_GPL(bt_debugfs);
+
static int __init bt_init(void)
{
int err;
BT_INFO("Core ver %s", VERSION);
+ bt_debugfs = debugfs_create_dir("bluetooth", NULL);
+
err = bt_sysfs_init();
if (err < 0)
return err;
@@ -538,18 +727,45 @@ static int __init bt_init(void)
BT_INFO("HCI device and connection manager initialized");
- hci_sock_init();
+ err = hci_sock_init();
+ if (err < 0)
+ goto error;
+
+ err = l2cap_init();
+ if (err < 0)
+ goto sock_err;
+
+ err = sco_init();
+ if (err < 0) {
+ l2cap_exit();
+ goto sock_err;
+ }
return 0;
+
+sock_err:
+ hci_sock_cleanup();
+
+error:
+ sock_unregister(PF_BLUETOOTH);
+ bt_sysfs_cleanup();
+
+ return err;
}
static void __exit bt_exit(void)
{
+ sco_exit();
+
+ l2cap_exit();
+
hci_sock_cleanup();
sock_unregister(PF_BLUETOOTH);
bt_sysfs_cleanup();
+
+ debugfs_remove_recursive(bt_debugfs);
}
subsys_initcall(bt_init);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
new file mode 100644
index 00000000000..bb39509b3f0
--- /dev/null
+++ b/net/bluetooth/amp.c
@@ -0,0 +1,468 @@
+/*
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_core.h>
+#include <crypto/hash.h>
+
+#include "a2mp.h"
+#include "amp.h"
+
+/* Remote AMP Controllers interface */
+void amp_ctrl_get(struct amp_ctrl *ctrl)
+{
+ BT_DBG("ctrl %p orig refcnt %d", ctrl,
+ atomic_read(&ctrl->kref.refcount));
+
+ kref_get(&ctrl->kref);
+}
+
+static void amp_ctrl_destroy(struct kref *kref)
+{
+ struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
+
+ BT_DBG("ctrl %p", ctrl);
+
+ kfree(ctrl->assoc);
+ kfree(ctrl);
+}
+
+int amp_ctrl_put(struct amp_ctrl *ctrl)
+{
+ BT_DBG("ctrl %p orig refcnt %d", ctrl,
+ atomic_read(&ctrl->kref.refcount));
+
+ return kref_put(&ctrl->kref, &amp_ctrl_destroy);
+}
+
+struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
+{
+ struct amp_ctrl *ctrl;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return NULL;
+
+ kref_init(&ctrl->kref);
+ ctrl->id = id;
+
+ mutex_lock(&mgr->amp_ctrls_lock);
+ list_add(&ctrl->list, &mgr->amp_ctrls);
+ mutex_unlock(&mgr->amp_ctrls_lock);
+
+ BT_DBG("mgr %p ctrl %p", mgr, ctrl);
+
+ return ctrl;
+}
+
+void amp_ctrl_list_flush(struct amp_mgr *mgr)
+{
+ struct amp_ctrl *ctrl, *n;
+
+ BT_DBG("mgr %p", mgr);
+
+ mutex_lock(&mgr->amp_ctrls_lock);
+ list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
+ list_del(&ctrl->list);
+ amp_ctrl_put(ctrl);
+ }
+ mutex_unlock(&mgr->amp_ctrls_lock);
+}
+
+struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
+{
+ struct amp_ctrl *ctrl;
+
+ BT_DBG("mgr %p id %d", mgr, id);
+
+ mutex_lock(&mgr->amp_ctrls_lock);
+ list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
+ if (ctrl->id == id) {
+ amp_ctrl_get(ctrl);
+ mutex_unlock(&mgr->amp_ctrls_lock);
+ return ctrl;
+ }
+ }
+ mutex_unlock(&mgr->amp_ctrls_lock);
+
+ return NULL;
+}
+
+/* Physical Link interface */
+static u8 __next_handle(struct amp_mgr *mgr)
+{
+ if (++mgr->handle == 0)
+ mgr->handle = 1;
+
+ return mgr->handle;
+}
+
+struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+ u8 remote_id, bool out)
+{
+ bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
+ struct hci_conn *hcon;
+
+ hcon = hci_conn_add(hdev, AMP_LINK, dst);
+ if (!hcon)
+ return NULL;
+
+ BT_DBG("hcon %p dst %pMR", hcon, dst);
+
+ hcon->state = BT_CONNECT;
+ hcon->attempt++;
+ hcon->handle = __next_handle(mgr);
+ hcon->remote_id = remote_id;
+ hcon->amp_mgr = amp_mgr_get(mgr);
+ hcon->out = out;
+
+ return hcon;
+}
+
+/* AMP crypto key generation interface */
+static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
+{
+ int ret = 0;
+ struct crypto_shash *tfm;
+
+ if (!ksize)
+ return -EINVAL;
+
+ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+ if (IS_ERR(tfm)) {
+ BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+
+ ret = crypto_shash_setkey(tfm, key, ksize);
+ if (ret) {
+ BT_DBG("crypto_ahash_setkey failed: err %d", ret);
+ } else {
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
+
+ desc.shash.tfm = tfm;
+ desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ ret = crypto_shash_digest(&desc.shash, plaintext, psize,
+ output);
+ }
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct link_key *key;
+ u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
+ u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
+ int err;
+
+ if (!hci_conn_check_link_mode(conn))
+ return -EACCES;
+
+ BT_DBG("conn %p key_type %d", conn, conn->key_type);
+
+ /* Legacy key */
+ if (conn->key_type < 3) {
+ BT_ERR("Legacy key type %d", conn->key_type);
+ return -EACCES;
+ }
+
+ *type = conn->key_type;
+ *len = HCI_AMP_LINK_KEY_SIZE;
+
+ key = hci_find_link_key(hdev, &conn->dst);
+ if (!key) {
+ BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
+ return -EACCES;
+ }
+
+ /* BR/EDR Link Key concatenated together with itself */
+ memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
+ memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
+
+ /* Derive Generic AMP Link Key (gamp) */
+ err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
+ if (err) {
+ BT_ERR("Could not derive Generic AMP Key: err %d", err);
+ return err;
+ }
+
+ if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
+ BT_DBG("Use Generic AMP Key (gamp)");
+ memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
+ return err;
+ }
+
+ /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
+ return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
+}
+
+void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
+{
+ struct hci_cp_read_local_amp_assoc cp;
+ struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+
+ BT_DBG("%s handle %d", hdev->name, phy_handle);
+
+ cp.phy_handle = phy_handle;
+ cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+ cp.len_so_far = cpu_to_le16(loc_assoc->offset);
+
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
+{
+ struct hci_cp_read_local_amp_assoc cp;
+
+ memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
+ memset(&cp, 0, sizeof(cp));
+
+ cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+
+ set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_read_local_amp_assoc cp;
+ struct amp_mgr *mgr = hcon->amp_mgr;
+
+ cp.phy_handle = hcon->handle;
+ cp.len_so_far = cpu_to_le16(0);
+ cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
+
+ set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
+
+ /* Read Local AMP Assoc final link information data */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+}
+
+/* Write AMP Assoc data fragments, returns true with last fragment written*/
+static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_write_remote_amp_assoc *cp;
+ struct amp_mgr *mgr = hcon->amp_mgr;
+ struct amp_ctrl *ctrl;
+ u16 frag_len, len;
+
+ ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
+ if (!ctrl)
+ return false;
+
+ if (!ctrl->assoc_rem_len) {
+ BT_DBG("all fragments are written");
+ ctrl->assoc_rem_len = ctrl->assoc_len;
+ ctrl->assoc_len_so_far = 0;
+
+ amp_ctrl_put(ctrl);
+ return true;
+ }
+
+ frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
+ len = frag_len + sizeof(*cp);
+
+ cp = kzalloc(len, GFP_KERNEL);
+ if (!cp) {
+ amp_ctrl_put(ctrl);
+ return false;
+ }
+
+ BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
+ hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
+
+ cp->phy_handle = hcon->handle;
+ cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
+ cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
+ memcpy(cp->frag, ctrl->assoc, frag_len);
+
+ ctrl->assoc_len_so_far += frag_len;
+ ctrl->assoc_rem_len -= frag_len;
+
+ amp_ctrl_put(ctrl);
+
+ hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+
+ kfree(cp);
+
+ return false;
+}
+
+void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
+{
+ struct hci_conn *hcon;
+
+ BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon)
+ return;
+
+ /* Send A2MP create phylink rsp when all fragments are written */
+ if (amp_write_rem_assoc_frag(hdev, hcon))
+ a2mp_send_create_phy_link_rsp(hdev, 0);
+}
+
+void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
+{
+ struct hci_conn *hcon;
+
+ BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon)
+ return;
+
+ BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
+
+ amp_write_rem_assoc_frag(hdev, hcon);
+}
+
+void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_create_phy_link cp;
+
+ cp.phy_handle = hcon->handle;
+
+ BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+ hcon->handle);
+
+ if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+ &cp.key_type)) {
+ BT_DBG("Cannot create link key");
+ return;
+ }
+
+ hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+}
+
+void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon)
+{
+ struct hci_cp_accept_phy_link cp;
+
+ cp.phy_handle = hcon->handle;
+
+ BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
+ hcon->handle);
+
+ if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
+ &cp.key_type)) {
+ BT_DBG("Cannot create link key");
+ return;
+ }
+
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+}
+
+void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
+{
+ struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
+ struct amp_mgr *mgr = hs_hcon->amp_mgr;
+ struct l2cap_chan *bredr_chan;
+
+ BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
+
+ if (!bredr_hdev || !mgr || !mgr->bredr_chan)
+ return;
+
+ bredr_chan = mgr->bredr_chan;
+
+ l2cap_chan_lock(bredr_chan);
+
+ set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
+ bredr_chan->remote_amp_id = hs_hcon->remote_id;
+ bredr_chan->local_amp_id = hs_hcon->hdev->id;
+ bredr_chan->hs_hcon = hs_hcon;
+ bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
+
+ __l2cap_physical_cfm(bredr_chan, 0);
+
+ l2cap_chan_unlock(bredr_chan);
+
+ hci_dev_put(bredr_hdev);
+}
+
+void amp_create_logical_link(struct l2cap_chan *chan)
+{
+ struct hci_conn *hs_hcon = chan->hs_hcon;
+ struct hci_cp_create_accept_logical_link cp;
+ struct hci_dev *hdev;
+
+ BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
+ &chan->conn->hcon->dst);
+
+ if (!hs_hcon)
+ return;
+
+ hdev = hci_dev_hold(chan->hs_hcon->hdev);
+ if (!hdev)
+ return;
+
+ cp.phy_handle = hs_hcon->handle;
+
+ cp.tx_flow_spec.id = chan->local_id;
+ cp.tx_flow_spec.stype = chan->local_stype;
+ cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
+ cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
+ cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
+
+ cp.rx_flow_spec.id = chan->remote_id;
+ cp.rx_flow_spec.stype = chan->remote_stype;
+ cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
+ cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
+ cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
+ cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
+
+ if (hs_hcon->out)
+ hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
+ &cp);
+ else
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
+ &cp);
+
+ hci_dev_put(hdev);
+}
+
+void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+ struct hci_conn *hcon = hchan->conn;
+ struct hci_cp_disconn_logical_link cp;
+
+ if (hcon->state != BT_CONNECTED) {
+ BT_DBG("hchan %p not connected", hchan);
+ return;
+ }
+
+ cp.log_handle = cpu_to_le16(hchan->handle);
+ hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
+}
+
+void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
+{
+ BT_DBG("hchan %p", hchan);
+
+ hci_chan_del(hchan);
+}
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
new file mode 100644
index 00000000000..7ea3db77ba8
--- /dev/null
+++ b/net/bluetooth/amp.h
@@ -0,0 +1,54 @@
+/*
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#ifndef __AMP_H
+#define __AMP_H
+
+struct amp_ctrl {
+ struct list_head list;
+ struct kref kref;
+ __u8 id;
+ __u16 assoc_len_so_far;
+ __u16 assoc_rem_len;
+ __u16 assoc_len;
+ __u8 *assoc;
+};
+
+int amp_ctrl_put(struct amp_ctrl *ctrl);
+void amp_ctrl_get(struct amp_ctrl *ctrl);
+struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
+struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
+void amp_ctrl_list_flush(struct amp_mgr *mgr);
+
+struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
+ u8 remote_id, bool out);
+
+int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
+
+void amp_read_loc_info(struct hci_dev *hdev, struct amp_mgr *mgr);
+void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
+void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
+void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
+ struct hci_conn *hcon);
+void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon);
+void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
+ struct hci_conn *hcon);
+void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
+void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
+void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
+
+#endif /* __AMP_H */
diff --git a/net/bluetooth/bnep/Kconfig b/net/bluetooth/bnep/Kconfig
index 35158b036d5..71791fc9f6b 100644
--- a/net/bluetooth/bnep/Kconfig
+++ b/net/bluetooth/bnep/Kconfig
@@ -1,6 +1,6 @@
config BT_BNEP
tristate "BNEP protocol support"
- depends on BT && BT_L2CAP
+ depends on BT
select CRC32
help
BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 70672544db8..5a5b16f365e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -12,8 +12,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _BNEP_H
@@ -23,88 +22,88 @@
#include <linux/crc32.h>
#include <net/bluetooth/bluetooth.h>
-// Limits
-#define BNEP_MAX_PROTO_FILTERS 5
-#define BNEP_MAX_MULTICAST_FILTERS 20
-
-// UUIDs
-#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB
-#define BNEP_UUID16 0x02
-#define BNEP_UUID32 0x04
-#define BNEP_UUID128 0x16
-
-#define BNEP_SVC_PANU 0x1115
-#define BNEP_SVC_NAP 0x1116
-#define BNEP_SVC_GN 0x1117
-
-// Packet types
-#define BNEP_GENERAL 0x00
-#define BNEP_CONTROL 0x01
-#define BNEP_COMPRESSED 0x02
-#define BNEP_COMPRESSED_SRC_ONLY 0x03
-#define BNEP_COMPRESSED_DST_ONLY 0x04
-
-// Control types
-#define BNEP_CMD_NOT_UNDERSTOOD 0x00
-#define BNEP_SETUP_CONN_REQ 0x01
-#define BNEP_SETUP_CONN_RSP 0x02
-#define BNEP_FILTER_NET_TYPE_SET 0x03
-#define BNEP_FILTER_NET_TYPE_RSP 0x04
-#define BNEP_FILTER_MULTI_ADDR_SET 0x05
-#define BNEP_FILTER_MULTI_ADDR_RSP 0x06
-
-// Extension types
-#define BNEP_EXT_CONTROL 0x00
-
-// Response messages
-#define BNEP_SUCCESS 0x00
-
-#define BNEP_CONN_INVALID_DST 0x01
-#define BNEP_CONN_INVALID_SRC 0x02
-#define BNEP_CONN_INVALID_SVC 0x03
-#define BNEP_CONN_NOT_ALLOWED 0x04
-
-#define BNEP_FILTER_UNSUPPORTED_REQ 0x01
-#define BNEP_FILTER_INVALID_RANGE 0x02
-#define BNEP_FILTER_INVALID_MCADDR 0x02
-#define BNEP_FILTER_LIMIT_REACHED 0x03
-#define BNEP_FILTER_DENIED_SECURITY 0x04
-
-// L2CAP settings
-#define BNEP_MTU 1691
-#define BNEP_PSM 0x0f
-#define BNEP_FLUSH_TO 0xffff
-#define BNEP_CONNECT_TO 15
-#define BNEP_FILTER_TO 15
-
-// Headers
-#define BNEP_TYPE_MASK 0x7f
-#define BNEP_EXT_HEADER 0x80
+/* Limits */
+#define BNEP_MAX_PROTO_FILTERS 5
+#define BNEP_MAX_MULTICAST_FILTERS 20
+
+/* UUIDs */
+#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB
+#define BNEP_UUID16 0x02
+#define BNEP_UUID32 0x04
+#define BNEP_UUID128 0x16
+
+#define BNEP_SVC_PANU 0x1115
+#define BNEP_SVC_NAP 0x1116
+#define BNEP_SVC_GN 0x1117
+
+/* Packet types */
+#define BNEP_GENERAL 0x00
+#define BNEP_CONTROL 0x01
+#define BNEP_COMPRESSED 0x02
+#define BNEP_COMPRESSED_SRC_ONLY 0x03
+#define BNEP_COMPRESSED_DST_ONLY 0x04
+
+/* Control types */
+#define BNEP_CMD_NOT_UNDERSTOOD 0x00
+#define BNEP_SETUP_CONN_REQ 0x01
+#define BNEP_SETUP_CONN_RSP 0x02
+#define BNEP_FILTER_NET_TYPE_SET 0x03
+#define BNEP_FILTER_NET_TYPE_RSP 0x04
+#define BNEP_FILTER_MULTI_ADDR_SET 0x05
+#define BNEP_FILTER_MULTI_ADDR_RSP 0x06
+
+/* Extension types */
+#define BNEP_EXT_CONTROL 0x00
+
+/* Response messages */
+#define BNEP_SUCCESS 0x00
+
+#define BNEP_CONN_INVALID_DST 0x01
+#define BNEP_CONN_INVALID_SRC 0x02
+#define BNEP_CONN_INVALID_SVC 0x03
+#define BNEP_CONN_NOT_ALLOWED 0x04
+
+#define BNEP_FILTER_UNSUPPORTED_REQ 0x01
+#define BNEP_FILTER_INVALID_RANGE 0x02
+#define BNEP_FILTER_INVALID_MCADDR 0x02
+#define BNEP_FILTER_LIMIT_REACHED 0x03
+#define BNEP_FILTER_DENIED_SECURITY 0x04
+
+/* L2CAP settings */
+#define BNEP_MTU 1691
+#define BNEP_PSM 0x0f
+#define BNEP_FLUSH_TO 0xffff
+#define BNEP_CONNECT_TO 15
+#define BNEP_FILTER_TO 15
+
+/* Headers */
+#define BNEP_TYPE_MASK 0x7f
+#define BNEP_EXT_HEADER 0x80
struct bnep_setup_conn_req {
- __u8 type;
- __u8 ctrl;
- __u8 uuid_size;
- __u8 service[0];
+ __u8 type;
+ __u8 ctrl;
+ __u8 uuid_size;
+ __u8 service[0];
} __packed;
struct bnep_set_filter_req {
- __u8 type;
- __u8 ctrl;
+ __u8 type;
+ __u8 ctrl;
__be16 len;
- __u8 list[0];
+ __u8 list[0];
} __packed;
struct bnep_control_rsp {
- __u8 type;
- __u8 ctrl;
+ __u8 type;
+ __u8 ctrl;
__be16 resp;
} __packed;
struct bnep_ext_hdr {
- __u8 type;
- __u8 len;
- __u8 data[0];
+ __u8 type;
+ __u8 len;
+ __u8 data[0];
} __packed;
/* BNEP ioctl defines */
@@ -114,10 +113,10 @@ struct bnep_ext_hdr {
#define BNEPGETCONNINFO _IOR('B', 211, int)
struct bnep_connadd_req {
- int sock; // Connected socket
+ int sock; /* Connected socket */
__u32 flags;
__u16 role;
- char device[16]; // Name of the Ethernet device
+ char device[16]; /* Name of the Ethernet device */
};
struct bnep_conndel_req {
@@ -148,14 +147,15 @@ int bnep_del_connection(struct bnep_conndel_req *req);
int bnep_get_connlist(struct bnep_connlist_req *req);
int bnep_get_conninfo(struct bnep_conninfo *ci);
-// BNEP sessions
+/* BNEP sessions */
struct bnep_session {
struct list_head list;
unsigned int role;
unsigned long state;
unsigned long flags;
- atomic_t killed;
+ atomic_t terminate;
+ struct task_struct *task;
struct ethhdr eh;
struct msghdr msg;
@@ -173,7 +173,7 @@ void bnep_sock_cleanup(void);
static inline int bnep_mc_hash(__u8 *addr)
{
- return (crc32_be(~0, addr, ETH_ALEN) >> 26);
+ return crc32_be(~0, addr, ETH_ALEN) >> 26;
}
#endif
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 5868597534e..a841d3e776c 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,37 +26,21 @@
*/
#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/freezer.h>
-#include <linux/errno.h>
-#include <linux/net.h>
-#include <linux/slab.h>
-#include <net/sock.h>
-
-#include <linux/socket.h>
+#include <linux/kthread.h>
#include <linux/file.h>
-
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/hci_core.h>
#include "bnep.h"
#define VERSION "1.3"
-static int compress_src = 1;
-static int compress_dst = 1;
+static bool compress_src = true;
+static bool compress_dst = true;
static LIST_HEAD(bnep_session_list);
static DECLARE_RWSEM(bnep_session_sem);
@@ -64,31 +48,24 @@ static DECLARE_RWSEM(bnep_session_sem);
static struct bnep_session *__bnep_get_session(u8 *dst)
{
struct bnep_session *s;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &bnep_session_list) {
- s = list_entry(p, struct bnep_session, list);
- if (!compare_ether_addr(dst, s->eh.h_source))
+ list_for_each_entry(s, &bnep_session_list, list)
+ if (ether_addr_equal(dst, s->eh.h_source))
return s;
- }
+
return NULL;
}
static void __bnep_link_session(struct bnep_session *s)
{
- /* It's safe to call __module_get() here because sessions are added
- by the socket layer which has to hold the reference to this module.
- */
- __module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
}
static void __bnep_unlink_session(struct bnep_session *s)
{
list_del(&s->list);
- module_put(THIS_MODULE);
}
static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -131,7 +108,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
return -EILSEQ;
n = get_unaligned_be16(data);
- data++; len -= 2;
+ data++;
+ len -= 2;
if (len < n)
return -EILSEQ;
@@ -176,7 +154,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
return -EILSEQ;
n = get_unaligned_be16(data);
- data += 2; len -= 2;
+ data += 2;
+ len -= 2;
if (len < n)
return -EILSEQ;
@@ -187,6 +166,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
n /= (ETH_ALEN * 2);
if (n > 0) {
+ int i;
+
s->mc_filter = 0;
/* Always send broadcast */
@@ -196,18 +177,21 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
for (; n > 0; n--) {
u8 a1[6], *a2;
- memcpy(a1, data, ETH_ALEN); data += ETH_ALEN;
- a2 = data; data += ETH_ALEN;
-
- BT_DBG("mc filter %s -> %s",
- batostr((void *) a1), batostr((void *) a2));
+ memcpy(a1, data, ETH_ALEN);
+ data += ETH_ALEN;
+ a2 = data;
+ data += ETH_ALEN;
- #define INCA(a) { int i = 5; while (i >=0 && ++a[i--] == 0); }
+ BT_DBG("mc filter %pMR -> %pMR", a1, a2);
/* Iterate from a1 to a2 */
set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) {
- INCA(a1);
+ /* Increment a1 */
+ i = 5;
+ while (i >= 0 && ++a1[i--] == 0)
+ ;
+
set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
}
}
@@ -227,7 +211,8 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
u8 cmd = *(u8 *)data;
int err = 0;
- data++; len--;
+ data++;
+ len--;
switch (cmd) {
case BNEP_CMD_NOT_UNDERSTOOD:
@@ -302,9 +287,8 @@ static u8 __bnep_rx_hlen[] = {
ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */
ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
};
-#define BNEP_RX_TYPES (sizeof(__bnep_rx_hlen) - 1)
-static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct net_device *dev = s->dev;
struct sk_buff *nskb;
@@ -312,9 +296,10 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
dev->stats.rx_bytes += skb->len;
- type = *(u8 *) skb->data; skb_pull(skb, 1);
+ type = *(u8 *) skb->data;
+ skb_pull(skb, 1);
- if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES)
+ if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
goto badframe;
if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
@@ -337,7 +322,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
}
/* Strip 802.1p header */
- if (ntohs(s->eh.h_proto) == 0x8100) {
+ if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
if (!skb_pull(skb, 4))
goto badframe;
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
@@ -367,14 +352,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
case BNEP_COMPRESSED_DST_ONLY:
memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb),
- ETH_ALEN);
+ ETH_ALEN);
memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source,
- ETH_ALEN + 2);
+ ETH_ALEN + 2);
break;
case BNEP_GENERAL:
memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb),
- ETH_ALEN * 2);
+ ETH_ALEN * 2);
put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
break;
}
@@ -401,7 +386,7 @@ static u8 __bnep_tx_types[] = {
BNEP_COMPRESSED
};
-static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;
@@ -419,10 +404,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
iv[il++] = (struct kvec) { &type, 1 };
len++;
- if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source))
+ if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source))
type |= 0x01;
- if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest))
+ if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest))
type |= 0x02;
if (type)
@@ -470,24 +455,28 @@ static int bnep_session(void *arg)
BT_DBG("");
- daemonize("kbnepd %s", dev->name);
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
- while (!atomic_read(&s->killed)) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- // RX
+ if (atomic_read(&s->terminate))
+ break;
+ /* RX */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- bnep_rx_frame(s, skb);
+ if (!skb_linearize(skb))
+ bnep_rx_frame(s, skb);
+ else
+ kfree_skb(skb);
}
if (sk->sk_state != BT_CONNECTED)
break;
- // TX
+ /* TX */
while ((skb = skb_dequeue(&sk->sk_write_queue)))
if (bnep_tx_frame(s, skb))
break;
@@ -495,7 +484,7 @@ static int bnep_session(void *arg)
schedule();
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
@@ -516,25 +505,19 @@ static int bnep_session(void *arg)
up_write(&bnep_session_sem);
free_netdev(dev);
+ module_put_and_exit(0);
return 0;
}
static struct device *bnep_get_device(struct bnep_session *session)
{
- bdaddr_t *src = &bt_sk(session->sock->sk)->src;
- bdaddr_t *dst = &bt_sk(session->sock->sk)->dst;
- struct hci_dev *hdev;
struct hci_conn *conn;
- hdev = hci_get_route(dst, src);
- if (!hdev)
+ conn = l2cap_pi(session->sock->sk)->chan->conn->hcon;
+ if (!conn)
return NULL;
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-
- hci_dev_put(hdev);
-
- return conn ? &conn->dev : NULL;
+ return &conn->dev;
}
static struct device_type bnep_type = {
@@ -550,13 +533,13 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
BT_DBG("");
- baswap((void *) dst, &bt_sk(sock->sk)->dst);
- baswap((void *) src, &bt_sk(sock->sk)->src);
+ baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
+ baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
/* session struct allocated as private part of net_device */
dev = alloc_netdev(sizeof(struct bnep_session),
- (*req->device) ? req->device : "bnep%d",
- bnep_net_setup);
+ (*req->device) ? req->device : "bnep%d",
+ bnep_net_setup);
if (!dev)
return -ENOMEM;
@@ -571,7 +554,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
s = netdev_priv(dev);
/* This is rx header therefore addresses are swapped.
- * ie eh.h_dest is our local address. */
+ * ie. eh.h_dest is our local address. */
memcpy(s->eh.h_dest, &src, ETH_ALEN);
memcpy(s->eh.h_source, &dst, ETH_ALEN);
memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
@@ -597,17 +580,19 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
SET_NETDEV_DEVTYPE(dev, &bnep_type);
err = register_netdev(dev);
- if (err) {
+ if (err)
goto failed;
- }
__bnep_link_session(s);
- err = kernel_thread(bnep_session, s, CLONE_KERNEL);
- if (err < 0) {
+ __module_get(THIS_MODULE);
+ s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
+ if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
+ module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
+ err = PTR_ERR(s->task);
goto failed;
}
@@ -632,13 +617,8 @@ int bnep_del_connection(struct bnep_conndel_req *req)
s = __bnep_get_session(req->dst);
if (s) {
- /* Wakeup user-space which is polling for socket errors.
- * This is temporary hack until we have shutdown in L2CAP */
- s->sock->sk->sk_err = EUNATCH;
-
- /* Kill session thread */
- atomic_inc(&s->killed);
- wake_up_interruptible(sk_sleep(s->sock->sk));
+ atomic_inc(&s->terminate);
+ wake_up_process(s->task);
} else
err = -ENOENT;
@@ -658,17 +638,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
int bnep_get_connlist(struct bnep_connlist_req *req)
{
- struct list_head *p;
+ struct bnep_session *s;
int err = 0, n = 0;
down_read(&bnep_session_sem);
- list_for_each(p, &bnep_session_list) {
- struct bnep_session *s;
+ list_for_each_entry(s, &bnep_session_list, list) {
struct bnep_conninfo ci;
- s = list_entry(p, struct bnep_session, list);
-
__bnep_copy_ci(&ci, s);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
@@ -708,8 +685,6 @@ static int __init bnep_init(void)
{
char flt[50] = "";
- l2cap_load();
-
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
strcat(flt, "protocol ");
#endif
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 8c100c9dae2..4b488ec2610 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,7 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <linux/socket.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/wait.h>
-
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -128,7 +119,7 @@ static void bnep_net_timeout(struct net_device *dev)
}
#ifdef CONFIG_BT_BNEP_MC_FILTER
-static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
struct ethhdr *eh = (void *) skb->data;
@@ -140,12 +131,12 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Determine ether protocol. Based on eth_type_trans. */
-static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
+static u16 bnep_net_eth_proto(struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
u16 proto = ntohs(eh->h_proto);
- if (proto >= 1536)
+ if (proto >= ETH_P_802_3_MIN)
return proto;
if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF))
@@ -154,7 +145,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
return ETH_P_802_2;
}
-static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
{
u16 proto = bnep_net_eth_proto(skb);
struct bnep_proto_filter *f = s->proto_filter;
@@ -217,7 +208,7 @@ static const struct net_device_ops bnep_netdev_ops = {
.ndo_stop = bnep_net_close,
.ndo_start_xmit = bnep_net_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = bnep_net_set_mc_list,
+ .ndo_set_rx_mode = bnep_net_set_mc_list,
.ndo_set_mac_address = bnep_net_set_mac_addr,
.ndo_tx_timeout = bnep_net_timeout,
.ndo_change_mtu = eth_change_mtu,
@@ -231,6 +222,7 @@ void bnep_net_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &bnep_netdev_ops;
dev->watchdog_timeo = HZ * 2;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 2862f53b66b..5f051290dab 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,28 +24,15 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
#include "bnep.h"
+static struct bt_sock_list bnep_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock)
+};
+
static int bnep_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -55,6 +42,8 @@ static int bnep_sock_release(struct socket *sock)
if (!sk)
return 0;
+ bt_sock_unlink(&bnep_sk_list, sk);
+
sock_orphan(sk);
sock_put(sk);
return 0;
@@ -75,7 +64,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
switch (cmd) {
case BNEPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -88,6 +77,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
sockfd_put(nsock);
return -EBADFD;
}
+ ca.device[sizeof(ca.device)-1] = 0;
err = bnep_add_connection(&ca, nsock);
if (!err) {
@@ -100,7 +90,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
case BNEPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
@@ -142,10 +132,10 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
{
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -156,7 +146,7 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = bnep_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
@@ -220,6 +210,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
+ bt_sock_link(&bnep_sk_list, sk);
return 0;
}
@@ -238,21 +229,30 @@ int __init bnep_sock_init(void)
return err;
err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("Can't register BNEP socket");
goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "bnep", &bnep_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create BNEP proc file");
+ bt_sock_unregister(BTPROTO_BNEP);
+ goto error;
+ }
+
+ BT_INFO("BNEP socket layer initialized");
return 0;
error:
- BT_ERR("Can't register BNEP socket");
proto_unregister(&bnep_proto);
return err;
}
void __exit bnep_sock_cleanup(void)
{
- if (bt_sock_unregister(BTPROTO_BNEP) < 0)
- BT_ERR("Can't unregister BNEP socket");
-
+ bt_procfs_cleanup(&init_net, "bnep");
+ bt_sock_unregister(BTPROTO_BNEP);
proto_unregister(&bnep_proto);
}
diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig
index d6b0382f6f3..94cbf42ce15 100644
--- a/net/bluetooth/cmtp/Kconfig
+++ b/net/bluetooth/cmtp/Kconfig
@@ -1,6 +1,6 @@
config BT_CMTP
tristate "CMTP protocol support"
- depends on BT && BT_L2CAP && ISDN_CAPI
+ depends on BT && ISDN_CAPI
help
CMTP (CAPI Message Transport Protocol) is a transport layer
for CAPI messages. CMTP is required for the Bluetooth Common
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 3487cfe74ae..cd75e4d64b9 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -20,7 +20,7 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/types.h>
@@ -35,6 +35,7 @@
#include <linux/ioctl.h>
#include <linux/file.h>
#include <linux/wait.h>
+#include <linux/kthread.h>
#include <net/sock.h>
#include <linux/isdn/capilli.h>
@@ -143,7 +144,7 @@ static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
skb_queue_tail(&session->transmit, skb);
- cmtp_schedule(session);
+ wake_up_interruptible(sk_sleep(session->sock->sk));
}
static void cmtp_send_interopmsg(struct cmtp_session *session,
@@ -155,7 +156,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
- if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) {
+ skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for interoperability packet");
return;
}
@@ -324,7 +326,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
{
struct capi_ctr *ctrl = &session->ctrl;
struct cmtp_application *application;
- __u16 cmd, appl;
+ __u16 appl;
__u32 contr;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -342,7 +344,6 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
return;
}
- cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
appl = CAPIMSG_APPID(skb->data);
contr = CAPIMSG_CONTROL(skb->data);
@@ -386,7 +387,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
capi_ctr_down(ctrl);
atomic_inc(&session->terminate);
- cmtp_schedule(session);
+ wake_up_process(session->task);
}
static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
@@ -538,7 +539,7 @@ static int cmtp_proc_show(struct seq_file *m, void *v)
static int cmtp_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, cmtp_proc_show, PDE(inode)->data);
+ return single_open(file, cmtp_proc_show, PDE_DATA(inode));
}
static const struct file_operations cmtp_proc_fops = {
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index 785e79e953c..c32638dddbf 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -37,7 +37,7 @@
#define CMTP_LOOPBACK 0
struct cmtp_connadd_req {
- int sock; // Connected socket
+ int sock; /* Connected socket */
__u32 flags;
};
@@ -82,6 +82,7 @@ struct cmtp_session {
char name[BTNAMSIZ];
atomic_t terminate;
+ struct task_struct *task;
wait_queue_head_t wait;
@@ -121,13 +122,6 @@ void cmtp_detach_device(struct cmtp_session *session);
void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb);
-static inline void cmtp_schedule(struct cmtp_session *session)
-{
- struct sock *sk = session->sock->sk;
-
- wake_up_interruptible(sk_sleep(sk));
-}
-
/* CMTP init defines */
int cmtp_init_sockets(void);
void cmtp_cleanup_sockets(void);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 8e5f292529a..67fe5e84e68 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -35,6 +35,7 @@
#include <linux/ioctl.h>
#include <linux/file.h>
#include <linux/init.h>
+#include <linux/kthread.h>
#include <net/sock.h>
#include <linux/isdn/capilli.h>
@@ -52,28 +53,24 @@ static LIST_HEAD(cmtp_session_list);
static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
{
struct cmtp_session *session;
- struct list_head *p;
BT_DBG("");
- list_for_each(p, &cmtp_session_list) {
- session = list_entry(p, struct cmtp_session, list);
+ list_for_each_entry(session, &cmtp_session_list, list)
if (!bacmp(bdaddr, &session->bdaddr))
return session;
- }
+
return NULL;
}
static void __cmtp_link_session(struct cmtp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &cmtp_session_list);
}
static void __cmtp_unlink_session(struct cmtp_session *session)
{
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -115,7 +112,8 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
size = (skb) ? skb->len + count : count;
- if (!(nskb = alloc_skb(size, GFP_ATOMIC))) {
+ nskb = alloc_skb(size, GFP_ATOMIC);
+ if (!nskb) {
BT_ERR("Can't allocate memory for CAPI message");
return;
}
@@ -216,7 +214,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
BT_DBG("session %p", session);
- if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
+ nskb = alloc_skb(session->mtu, GFP_ATOMIC);
+ if (!nskb) {
BT_ERR("Can't allocate memory for new frame");
return;
}
@@ -224,7 +223,8 @@ static void cmtp_process_transmit(struct cmtp_session *session)
while ((skb = skb_dequeue(&session->transmit))) {
struct cmtp_scb *scb = (void *) skb->cb;
- if ((tail = (session->mtu - nskb->len)) < 5) {
+ tail = session->mtu - nskb->len;
+ if (tail < 5) {
cmtp_send_frame(session, nskb->data, nskb->len);
skb_trim(nskb, 0);
tail = session->mtu;
@@ -232,9 +232,12 @@ static void cmtp_process_transmit(struct cmtp_session *session)
size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len);
- if ((scb->id < 0) && ((scb->id = cmtp_alloc_block_id(session)) < 0)) {
- skb_queue_head(&session->transmit, skb);
- break;
+ if (scb->id < 0) {
+ scb->id = cmtp_alloc_block_id(session);
+ if (scb->id < 0) {
+ skb_queue_head(&session->transmit, skb);
+ break;
+ }
}
if (size < 256) {
@@ -281,27 +284,31 @@ static int cmtp_session(void *arg)
BT_DBG("session %p", session);
- daemonize("kcmtpd_ctr_%d", session->num);
set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
- while (!atomic_read(&session->terminate)) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&session->terminate))
+ break;
if (sk->sk_state != BT_CONNECTED)
break;
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- cmtp_recv_frame(session, skb);
+ if (!skb_linearize(skb))
+ cmtp_recv_frame(session, skb);
+ else
+ kfree_skb(skb);
}
cmtp_process_transmit(session);
schedule();
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
@@ -316,6 +323,7 @@ static int cmtp_session(void *arg)
up_write(&cmtp_session_sem);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
@@ -332,19 +340,20 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
down_write(&cmtp_session_sem);
- s = __cmtp_get_session(&bt_sk(sock->sk)->dst);
+ s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst);
if (s && s->state == BT_CONNECTED) {
err = -EEXIST;
goto failed;
}
- bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
+ bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst);
- session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu);
+ session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
+ l2cap_pi(sock->sk)->chan->imtu);
BT_DBG("mtu %d", session->mtu);
- sprintf(session->name, "%s", batostr(&bt_sk(sock->sk)->dst));
+ sprintf(session->name, "%pMR", &session->bdaddr);
session->sock = sock;
session->state = BT_CONFIG;
@@ -364,22 +373,28 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
__cmtp_link_session(session);
- err = kernel_thread(cmtp_session, session, CLONE_KERNEL);
- if (err < 0)
+ __module_get(THIS_MODULE);
+ session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
+ session->num);
+ if (IS_ERR(session->task)) {
+ module_put(THIS_MODULE);
+ err = PTR_ERR(session->task);
goto unlink;
+ }
if (!(session->flags & (1 << CMTP_LOOPBACK))) {
err = cmtp_attach_device(session);
- if (err < 0)
- goto detach;
+ if (err < 0) {
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
+ up_write(&cmtp_session_sem);
+ return err;
+ }
}
up_write(&cmtp_session_sem);
return 0;
-detach:
- cmtp_detach_device(session);
-
unlink:
__cmtp_unlink_session(session);
@@ -403,9 +418,9 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
/* Flush the transmit queue */
skb_queue_purge(&session->transmit);
- /* Kill session thread */
+ /* Stop session thread */
atomic_inc(&session->terminate);
- cmtp_schedule(session);
+ wake_up_process(session->task);
} else
err = -ENOENT;
@@ -415,19 +430,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
int cmtp_get_connlist(struct cmtp_connlist_req *req)
{
- struct list_head *p;
+ struct cmtp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&cmtp_session_sem);
- list_for_each(p, &cmtp_session_list) {
- struct cmtp_session *session;
+ list_for_each_entry(session, &cmtp_session_list, list) {
struct cmtp_conninfo ci;
- session = list_entry(p, struct cmtp_session, list);
-
__cmtp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
@@ -466,8 +478,6 @@ int cmtp_get_conninfo(struct cmtp_conninfo *ci)
static int __init cmtp_init(void)
{
- l2cap_load();
-
BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
cmtp_init_sockets();
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 7ea1979a8e4..d82787d417b 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -20,7 +20,7 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/capability.h>
@@ -34,15 +34,18 @@
#include <linux/file.h>
#include <linux/compat.h>
#include <linux/gfp.h>
+#include <linux/uaccess.h>
#include <net/sock.h>
#include <linux/isdn/capilli.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
#include "cmtp.h"
+static struct bt_sock_list cmtp_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock)
+};
+
static int cmtp_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -52,6 +55,8 @@ static int cmtp_sock_release(struct socket *sock)
if (!sk)
return 0;
+ bt_sock_unlink(&cmtp_sk_list, sk);
+
sock_orphan(sk);
sock_put(sk);
@@ -73,7 +78,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
switch (cmd) {
case CMTPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -98,7 +103,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
case CMTPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
@@ -137,10 +142,10 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
{
if (cmd == CMTPGETCONNLIST) {
struct cmtp_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -151,7 +156,7 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = cmtp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
@@ -215,6 +220,8 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
+ bt_sock_link(&cmtp_sk_list, sk);
+
return 0;
}
@@ -233,21 +240,30 @@ int cmtp_init_sockets(void)
return err;
err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("Can't register CMTP socket");
goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "cmtp", &cmtp_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create CMTP proc file");
+ bt_sock_unregister(BTPROTO_HIDP);
+ goto error;
+ }
+
+ BT_INFO("CMTP socket layer initialized");
return 0;
error:
- BT_ERR("Can't register CMTP socket");
proto_unregister(&cmtp_proto);
return err;
}
void cmtp_cleanup_sockets(void)
{
- if (bt_sock_unregister(BTPROTO_CMTP) < 0)
- BT_ERR("Can't unregister CMTP socket");
-
+ bt_procfs_cleanup(&init_net, "cmtp");
+ bt_sock_unregister(BTPROTO_CMTP);
proto_unregister(&cmtp_proto);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 99cd8d9d891..a7a27bc2c0b 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,37 +24,48 @@
/* Bluetooth HCI connection handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+#include "smp.h"
+#include "a2mp.h"
+
+struct sco_param {
+ u16 pkt_type;
+ u16 max_latency;
+};
+
+static const struct sco_param sco_param_cvsd[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
+ { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
+ { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
+};
+
+static const struct sco_param sco_param_wideband[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
+};
+
+static void hci_le_create_connection_cancel(struct hci_conn *conn)
+{
+ hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
+}
-void hci_acl_connect(struct hci_conn *conn)
+static void hci_acl_create_connection(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
struct inquiry_entry *ie;
struct hci_cp_create_conn cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->link_mode = HCI_LM_MASTER;
@@ -72,11 +83,12 @@ void hci_acl_connect(struct hci_conn *conn)
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
cp.pscan_mode = ie->data.pscan_mode;
cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
+ cpu_to_le16(0x8000);
}
memcpy(conn->dev_class, ie->data.dev_class, 3);
- conn->ssp_mode = ie->data.ssp_mode;
+ if (ie->data.ssp_mode > 0)
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -88,24 +100,34 @@ void hci_acl_connect(struct hci_conn *conn)
hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
}
-static void hci_acl_connect_cancel(struct hci_conn *conn)
+static void hci_acl_create_connection_cancel(struct hci_conn *conn)
{
struct hci_cp_create_conn_cancel cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
- if (conn->hdev->hci_ver < 2)
+ if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
bacpy(&cp.bdaddr, &conn->dst);
hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
}
-void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
+static void hci_reject_sco(struct hci_conn *conn)
+{
+ struct hci_cp_reject_sync_conn_req cp;
+
+ cp.reason = HCI_ERROR_REMOTE_USER_TERM;
+ bacpy(&cp.bdaddr, &conn->dst);
+
+ hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
+}
+
+void hci_disconnect(struct hci_conn *conn, __u8 reason)
{
struct hci_cp_disconnect cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_DISCONN;
@@ -114,15 +136,29 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
}
-void hci_add_sco(struct hci_conn *conn, __u16 handle)
+static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
+{
+ struct hci_cp_disconn_phy_link cp;
+
+ BT_DBG("hcon %p", conn);
+
+ conn->state = BT_DISCONN;
+
+ cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+ cp.reason = reason;
+ hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
+ sizeof(cp), &cp);
+}
+
+static void hci_add_sco(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_add_sco cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->attempt++;
@@ -132,28 +168,86 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
}
-void hci_setup_sync(struct hci_conn *conn, __u16 handle)
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
+ const struct sco_param *param;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
- conn->out = 1;
+ conn->out = true;
conn->attempt++;
cp.handle = cpu_to_le16(handle);
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = cpu_to_le32(0x00001f40);
cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
- cp.voice_setting = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.voice_setting = cpu_to_le16(conn->setting);
+
+ switch (conn->setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
+ return false;
+ cp.retrans_effort = 0x02;
+ param = &sco_param_wideband[conn->attempt - 1];
+ break;
+ case SCO_AIRMODE_CVSD:
+ if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
+ return false;
+ cp.retrans_effort = 0x01;
+ param = &sco_param_cvsd[conn->attempt - 1];
+ break;
+ default:
+ return false;
+ }
+
+ cp.pkt_type = __cpu_to_le16(param->pkt_type);
+ cp.max_latency = __cpu_to_le16(param->max_latency);
+
+ if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
+ return false;
- hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
+ return true;
+}
+
+void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
+ u16 latency, u16 to_multiplier)
+{
+ struct hci_cp_le_conn_update cp;
+ struct hci_dev *hdev = conn->hdev;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.conn_interval_min = cpu_to_le16(min);
+ cp.conn_interval_max = cpu_to_le16(max);
+ cp.conn_latency = cpu_to_le16(latency);
+ cp.supervision_timeout = cpu_to_le16(to_multiplier);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
+
+ hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
+}
+
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
+ __u8 ltk[16])
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_cp_le_start_enc cp;
+
+ BT_DBG("hcon %p", conn);
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.rand = rand;
+ cp.ediv = ediv;
+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
+
+ hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
}
/* Device _must_ be locked */
@@ -161,11 +255,11 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
struct hci_conn *sco = conn->link;
- BT_DBG("%p", conn);
-
if (!sco)
return;
+ BT_DBG("hcon %p", conn);
+
if (!status) {
if (lmp_esco_capable(conn->hdev))
hci_setup_sync(sco, conn->handle);
@@ -177,71 +271,167 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
}
}
-static void hci_conn_timeout(unsigned long arg)
+static void hci_conn_disconnect(struct hci_conn *conn)
{
- struct hci_conn *conn = (void *) arg;
- struct hci_dev *hdev = conn->hdev;
- __u8 reason;
+ __u8 reason = hci_proto_disconn_ind(conn);
- BT_DBG("conn %p state %d", conn, conn->state);
+ switch (conn->type) {
+ case AMP_LINK:
+ hci_amp_disconn(conn, reason);
+ break;
+ default:
+ hci_disconnect(conn, reason);
+ break;
+ }
+}
- if (atomic_read(&conn->refcnt))
+static void hci_conn_timeout(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ disc_work.work);
+ int refcnt = atomic_read(&conn->refcnt);
+
+ BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
+
+ WARN_ON(refcnt < 0);
+
+ /* FIXME: It was observed that in pairing failed scenario, refcnt
+ * drops below 0. Probably this is because l2cap_conn_del calls
+ * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
+ * dropped. After that loop hci_chan_del is called which also drops
+ * conn. For now make sure that ACL is alive if refcnt is higher then 0,
+ * otherwise drop it.
+ */
+ if (refcnt > 0)
return;
- hci_dev_lock(hdev);
-
switch (conn->state) {
case BT_CONNECT:
case BT_CONNECT2:
- if (conn->type == ACL_LINK && conn->out)
- hci_acl_connect_cancel(conn);
+ if (conn->out) {
+ if (conn->type == ACL_LINK)
+ hci_acl_create_connection_cancel(conn);
+ else if (conn->type == LE_LINK)
+ hci_le_create_connection_cancel(conn);
+ } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
+ hci_reject_sco(conn);
+ }
break;
case BT_CONFIG:
case BT_CONNECTED:
- reason = hci_proto_disconn_ind(conn);
- hci_acl_disconn(conn, reason);
+ hci_conn_disconnect(conn);
break;
default:
conn->state = BT_CLOSED;
break;
}
+}
- hci_dev_unlock(hdev);
+/* Enter sniff mode */
+static void hci_conn_idle(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ idle_work.work);
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
+
+ if (test_bit(HCI_RAW, &hdev->flags))
+ return;
+
+ if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
+ return;
+
+ if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
+ return;
+
+ if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
+ struct hci_cp_sniff_subrate cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_latency = cpu_to_le16(0);
+ cp.min_remote_timeout = cpu_to_le16(0);
+ cp.min_local_timeout = cpu_to_le16(0);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
+ }
+
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
+ struct hci_cp_sniff_mode cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
+ cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
+ cp.attempt = cpu_to_le16(4);
+ cp.timeout = cpu_to_le16(1);
+ hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
+ }
}
-static void hci_conn_idle(unsigned long arg)
+static void hci_conn_auto_accept(struct work_struct *work)
{
- struct hci_conn *conn = (void *) arg;
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ auto_accept_work.work);
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
+ &conn->dst);
+}
- hci_conn_enter_sniff_mode(conn);
+static void le_conn_timeout(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn,
+ le_conn_timeout.work);
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("");
+
+ /* We could end up here due to having done directed advertising,
+ * so clean up the state if necessary. This should however only
+ * happen with broken hardware or if low duty cycle was used
+ * (which doesn't have a timeout of its own).
+ */
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ u8 enable = 0x00;
+ hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
+ &enable);
+ hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
+ return;
+ }
+
+ hci_le_create_connection_cancel(conn);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
{
struct hci_conn *conn;
- BT_DBG("%s dst %s", hdev->name, batostr(dst));
+ BT_DBG("%s dst %pMR", hdev->name, dst);
- conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
+ conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
if (!conn)
return NULL;
bacpy(&conn->dst, dst);
+ bacpy(&conn->src, &hdev->bdaddr);
conn->hdev = hdev;
conn->type = type;
conn->mode = HCI_CM_ACTIVE;
conn->state = BT_OPEN;
conn->auth_type = HCI_AT_GENERAL_BONDING;
+ conn->io_capability = hdev->io_capability;
+ conn->remote_auth = 0xff;
+ conn->key_type = 0xff;
+ conn->tx_power = HCI_TX_POWER_INVALID;
+ conn->max_tx_power = HCI_TX_POWER_INVALID;
- conn->power_save = 1;
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
switch (type) {
case ACL_LINK:
conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
break;
+ case LE_LINK:
+ /* conn->src should reflect the local identity address */
+ hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+ break;
case SCO_LINK:
if (lmp_esco_capable(hdev))
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
@@ -256,25 +446,23 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
skb_queue_head_init(&conn->data_q);
- setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
- setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
+ INIT_LIST_HEAD(&conn->chan_list);
+
+ INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
+ INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
+ INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
+ INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
atomic_set(&conn->refcnt, 0);
hci_dev_hold(hdev);
- tasklet_disable(&hdev->tx_task);
-
hci_conn_hash_add(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
- atomic_set(&conn->devref, 0);
-
hci_conn_init_sysfs(conn);
- tasklet_enable(&hdev->tx_task);
-
return conn;
}
@@ -282,11 +470,11 @@ int hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
-
- del_timer(&conn->idle_timer);
+ BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
- del_timer(&conn->disc_timer);
+ cancel_delayed_work_sync(&conn->disc_work);
+ cancel_delayed_work_sync(&conn->auto_accept_work);
+ cancel_delayed_work_sync(&conn->idle_work);
if (conn->type == ACL_LINK) {
struct hci_conn *sco = conn->link;
@@ -295,45 +483,55 @@ int hci_conn_del(struct hci_conn *conn)
/* Unacked frames */
hdev->acl_cnt += conn->sent;
+ } else if (conn->type == LE_LINK) {
+ cancel_delayed_work_sync(&conn->le_conn_timeout);
+
+ if (hdev->le_pkts)
+ hdev->le_cnt += conn->sent;
+ else
+ hdev->acl_cnt += conn->sent;
} else {
struct hci_conn *acl = conn->link;
if (acl) {
acl->link = NULL;
- hci_conn_put(acl);
+ hci_conn_drop(acl);
}
}
- tasklet_disable(&hdev->tx_task);
+ hci_chan_list_flush(conn);
+
+ if (conn->amp_mgr)
+ amp_mgr_put(conn->amp_mgr);
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
- tasklet_enable(&hdev->tx_task);
-
skb_queue_purge(&conn->data_q);
- hci_conn_put_device(conn);
+ hci_conn_del_sysfs(conn);
hci_dev_put(hdev);
+ hci_conn_put(conn);
+
return 0;
}
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
- BT_DBG("%s -> %s", batostr(src), batostr(dst));
+ BT_DBG("%pMR -> %pMR", src, dst);
- read_lock_bh(&hci_dev_list_lock);
+ read_lock(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
- if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (!test_bit(HCI_UP, &d->flags) ||
+ test_bit(HCI_RAW, &d->flags) ||
+ test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
+ d->dev_type != HCI_BREDR)
continue;
/* Simple routing:
@@ -355,25 +553,245 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
if (hdev)
hdev = hci_dev_hold(hdev);
- read_unlock_bh(&hci_dev_list_lock);
+ read_unlock(&hci_dev_list_lock);
return hdev;
}
EXPORT_SYMBOL(hci_get_route);
-/* Create SCO or ACL connection.
- * Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+/* This function requires the caller holds hdev->lock */
+void hci_le_conn_failed(struct hci_conn *conn, u8 status)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ conn->state = BT_CLOSED;
+
+ mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ status);
+
+ hci_proto_connect_cfm(conn, status);
+
+ hci_conn_del(conn);
+
+ /* Since we may have temporarily stopped the background scanning in
+ * favor of connection establishment, we should restart it.
+ */
+ hci_update_background_scan(hdev);
+
+ /* Re-enable advertising in case this was a failed connection
+ * attempt as a peripheral.
+ */
+ mgmt_reenable_advertising(hdev);
+}
+
+static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
+{
+ struct hci_conn *conn;
+
+ if (status == 0)
+ return;
+
+ BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
+ status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (!conn)
+ goto done;
+
+ hci_le_conn_failed(conn, status);
+
+done:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_req_add_le_create_conn(struct hci_request *req,
+ struct hci_conn *conn)
+{
+ struct hci_cp_le_create_conn cp;
+ struct hci_dev *hdev = conn->hdev;
+ u8 own_addr_type;
+
+ memset(&cp, 0, sizeof(cp));
+
+ /* Update random address, but set require_privacy to false so
+ * that we never connect with an unresolvable address.
+ */
+ if (hci_update_random_address(req, false, &own_addr_type))
+ return;
+
+ cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
+ cp.scan_window = cpu_to_le16(hdev->le_scan_window);
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.peer_addr_type = conn->dst_type;
+ cp.own_address_type = own_addr_type;
+ cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
+ cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
+ cp.supervision_timeout = cpu_to_le16(0x002a);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
+
+ hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+
+ conn->state = BT_CONNECT;
+}
+
+static void hci_req_directed_advertising(struct hci_request *req,
+ struct hci_conn *conn)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_adv_param cp;
+ u8 own_addr_type;
+ u8 enable;
+
+ enable = 0x00;
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+
+ /* Clear the HCI_ADVERTISING bit temporarily so that the
+ * hci_update_random_address knows that it's safe to go ahead
+ * and write a new random address. The flag will be set back on
+ * as soon as the SET_ADV_ENABLE HCI command completes.
+ */
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+ /* Set require_privacy to false so that the remote device has a
+ * chance of identifying us.
+ */
+ if (hci_update_random_address(req, false, &own_addr_type) < 0)
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.type = LE_ADV_DIRECT_IND;
+ cp.own_address_type = own_addr_type;
+ cp.direct_addr_type = conn->dst_type;
+ bacpy(&cp.direct_addr, &conn->dst);
+ cp.channel_map = hdev->le_adv_channel_map;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+ enable = 0x01;
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+
+ conn->state = BT_CONNECT;
+}
+
+struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 dst_type, u8 sec_level, u8 auth_type)
+{
+ struct hci_conn_params *params;
+ struct hci_conn *conn;
+ struct smp_irk *irk;
+ struct hci_request req;
+ int err;
+
+ /* Some devices send ATT messages as soon as the physical link is
+ * established. To be able to handle these ATT messages, the user-
+ * space first establishes the connection and then starts the pairing
+ * process.
+ *
+ * So if a hci_conn object already exists for the following connection
+ * attempt, we simply update pending_sec_level and auth_type fields
+ * and return the object found.
+ */
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+ if (conn) {
+ conn->pending_sec_level = sec_level;
+ conn->auth_type = auth_type;
+ goto done;
+ }
+
+ /* Since the controller supports only one LE connection attempt at a
+ * time, we return -EBUSY if there is any connection attempt running.
+ */
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ return ERR_PTR(-EBUSY);
+
+ /* When given an identity address with existing identity
+ * resolving key, the connection needs to be established
+ * to a resolvable random address.
+ *
+ * This uses the cached random resolvable address from
+ * a previous scan. When no cached address is available,
+ * try connecting to the identity address instead.
+ *
+ * Storing the resolvable random address is required here
+ * to handle connection failures. The address will later
+ * be resolved back into the original identity address
+ * from the connect request.
+ */
+ irk = hci_find_irk_by_addr(hdev, dst, dst_type);
+ if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
+ dst = &irk->rpa;
+ dst_type = ADDR_LE_DEV_RANDOM;
+ }
+
+ conn = hci_conn_add(hdev, LE_LINK, dst);
+ if (!conn)
+ return ERR_PTR(-ENOMEM);
+
+ conn->dst_type = dst_type;
+ conn->sec_level = BT_SECURITY_LOW;
+ conn->pending_sec_level = sec_level;
+ conn->auth_type = auth_type;
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ hci_req_directed_advertising(&req, conn);
+ goto create_conn;
+ }
+
+ conn->out = true;
+ conn->link_mode |= HCI_LM_MASTER;
+
+ params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+ if (params) {
+ conn->le_conn_min_interval = params->conn_min_interval;
+ conn->le_conn_max_interval = params->conn_max_interval;
+ } else {
+ conn->le_conn_min_interval = hdev->le_conn_min_interval;
+ conn->le_conn_max_interval = hdev->le_conn_max_interval;
+ }
+
+ /* If controller is scanning, we stop it since some controllers are
+ * not able to scan and connect at the same time. Also set the
+ * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
+ * handler for scan disabling knows to set the correct discovery
+ * state.
+ */
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+ hci_req_add_le_scan_disable(&req);
+ set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
+ }
+
+ hci_req_add_le_create_conn(&req, conn);
+
+create_conn:
+ err = hci_req_run(&req, create_le_conn_complete);
+ if (err) {
+ hci_conn_del(conn);
+ return ERR_PTR(err);
+ }
+
+done:
+ hci_conn_hold(conn);
+ return conn;
+}
+
+struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ u8 sec_level, u8 auth_type)
{
struct hci_conn *acl;
- struct hci_conn *sco;
- BT_DBG("%s dst %s", hdev->name, batostr(dst));
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return ERR_PTR(-ENOTSUPP);
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst);
if (!acl)
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
hci_conn_hold(acl);
@@ -382,18 +800,28 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
acl->sec_level = BT_SECURITY_LOW;
acl->pending_sec_level = sec_level;
acl->auth_type = auth_type;
- hci_acl_connect(acl);
+ hci_acl_create_connection(acl);
}
- if (type == ACL_LINK)
+ return acl;
+}
+
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting)
+{
+ struct hci_conn *acl;
+ struct hci_conn *sco;
+
+ acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ if (IS_ERR(acl))
return acl;
sco = hci_conn_hash_lookup_ba(hdev, type, dst);
if (!sco) {
sco = hci_conn_add(hdev, type, dst);
if (!sco) {
- hci_conn_put(acl);
- return NULL;
+ hci_conn_drop(acl);
+ return ERR_PTR(-ENOMEM);
}
}
@@ -402,14 +830,16 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
hci_conn_hold(sco);
+ sco->setting = setting;
+
if (acl->state == BT_CONNECTED &&
- (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
- acl->power_save = 1;
- hci_conn_enter_active_mode(acl);
+ (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+ set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
+ hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
- if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
+ if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
/* defer SCO setup until mode change completed */
- set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
+ set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
return sco;
}
@@ -418,25 +848,33 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
return sco;
}
-EXPORT_SYMBOL(hci_connect);
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
+
+ /* In Secure Connections Only mode, it is required that Secure
+ * Connections is used and the link is encrypted with AES-CCM
+ * using a P-256 authenticated combination key.
+ */
+ if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
+ if (!hci_conn_sc_enabled(conn) ||
+ !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
+ conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
+ return 0;
+ }
- if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
- !(conn->link_mode & HCI_LM_ENCRYPT))
+ if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
return 0;
return 1;
}
-EXPORT_SYMBOL(hci_conn_check_link_mode);
/* Authenticate remote device */
static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (conn->pending_sec_level > sec_level)
sec_level = conn->pending_sec_level;
@@ -451,71 +889,148 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
conn->auth_type = auth_type;
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
+
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
+
+ /* If we're already encrypted set the REAUTH_PEND flag,
+ * otherwise set the ENCRYPT_PEND.
+ */
+ if (conn->link_mode & HCI_LM_ENCRYPT)
+ set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+ else
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
}
return 0;
}
+/* Encrypt the the link */
+static void hci_conn_encrypt(struct hci_conn *conn)
+{
+ BT_DBG("hcon %p", conn);
+
+ if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.encrypt = 0x01;
+ hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+ &cp);
+ }
+}
+
/* Enable security */
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
+
+ if (conn->type == LE_LINK)
+ return smp_conn_security(conn, sec_level);
+ /* For sdp we don't need the link key. */
if (sec_level == BT_SECURITY_SDP)
return 1;
- if (sec_level == BT_SECURITY_LOW &&
- (!conn->ssp_mode || !conn->hdev->ssp_mode))
+ /* For non 2.1 devices and low security level we don't need the link
+ key. */
+ if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
return 1;
- if (conn->link_mode & HCI_LM_ENCRYPT)
- return hci_conn_auth(conn, sec_level, auth_type);
+ /* For other security levels we need the link key. */
+ if (!(conn->link_mode & HCI_LM_AUTH))
+ goto auth;
+
+ /* An authenticated FIPS approved combination key has sufficient
+ * security for security level 4. */
+ if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
+ sec_level == BT_SECURITY_FIPS)
+ goto encrypt;
+
+ /* An authenticated combination key has sufficient security for
+ security level 3. */
+ if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
+ conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
+ sec_level == BT_SECURITY_HIGH)
+ goto encrypt;
+
+ /* An unauthenticated combination key has sufficient security for
+ security level 1 and 2. */
+ if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
+ conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
+ (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
+ goto encrypt;
+
+ /* A combination key has always sufficient security for the security
+ levels 1 or 2. High security level requires the combination key
+ is generated using maximum PIN code length (16).
+ For pre 2.1 units. */
+ if (conn->key_type == HCI_LK_COMBINATION &&
+ (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
+ conn->pin_length == 16))
+ goto encrypt;
+
+auth:
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+ return 0;
- if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+ if (!hci_conn_auth(conn, sec_level, auth_type))
return 0;
- if (hci_conn_auth(conn, sec_level, auth_type)) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.encrypt = 1;
- hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
- sizeof(cp), &cp);
- }
+encrypt:
+ if (conn->link_mode & HCI_LM_ENCRYPT)
+ return 1;
+ hci_conn_encrypt(conn);
return 0;
}
EXPORT_SYMBOL(hci_conn_security);
+/* Check secure link requirement */
+int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
+{
+ BT_DBG("hcon %p", conn);
+
+ /* Accept if non-secure or higher security level is required */
+ if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
+ return 1;
+
+ /* Accept if secure or higher security level is already present */
+ if (conn->sec_level == BT_SECURITY_HIGH ||
+ conn->sec_level == BT_SECURITY_FIPS)
+ return 1;
+
+ /* Reject not secure link */
+ return 0;
+}
+EXPORT_SYMBOL(hci_conn_check_secure);
+
/* Change link key */
int hci_conn_change_link_key(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
- if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_change_conn_link_key cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
return 0;
}
-EXPORT_SYMBOL(hci_conn_change_link_key);
/* Switch role */
int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!role && conn->link_mode & HCI_LM_MASTER)
return 1;
- if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
+ if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
struct hci_cp_switch_role cp;
bacpy(&cp.bdaddr, &conn->dst);
cp.role = role;
@@ -527,19 +1042,22 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
EXPORT_SYMBOL(hci_conn_switch_role);
/* Enter active mode */
-void hci_conn_enter_active_mode(struct hci_conn *conn)
+void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
if (test_bit(HCI_RAW, &hdev->flags))
return;
- if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
+ if (conn->mode != HCI_CM_SNIFF)
goto timer;
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
+ goto timer;
+
+ if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
struct hci_cp_exit_sniff_mode cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
@@ -547,64 +1065,22 @@ void hci_conn_enter_active_mode(struct hci_conn *conn)
timer:
if (hdev->idle_timeout > 0)
- mod_timer(&conn->idle_timer,
- jiffies + msecs_to_jiffies(hdev->idle_timeout));
-}
-
-/* Enter sniff mode */
-void hci_conn_enter_sniff_mode(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p mode %d", conn, conn->mode);
-
- if (test_bit(HCI_RAW, &hdev->flags))
- return;
-
- if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
- return;
-
- if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
- return;
-
- if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
- struct hci_cp_sniff_subrate cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_latency = cpu_to_le16(0);
- cp.min_remote_timeout = cpu_to_le16(0);
- cp.min_local_timeout = cpu_to_le16(0);
- hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
- }
-
- if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
- struct hci_cp_sniff_mode cp;
- cp.handle = cpu_to_le16(conn->handle);
- cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
- cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
- cp.attempt = cpu_to_le16(4);
- cp.timeout = cpu_to_le16(1);
- hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
- }
+ queue_delayed_work(hdev->workqueue, &conn->idle_work,
+ msecs_to_jiffies(hdev->idle_timeout));
}
/* Drop all connection on the device */
void hci_conn_hash_flush(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
+ struct hci_conn *c, *n;
BT_DBG("hdev %s", hdev->name);
- p = h->list.next;
- while (p != &h->list) {
- struct hci_conn *c;
-
- c = list_entry(p, struct hci_conn, list);
- p = p->next;
-
+ list_for_each_entry_safe(c, n, &h->list, list) {
c->state = BT_CLOSED;
- hci_proto_disconn_cfm(c, 0x16);
+ hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
hci_conn_del(c);
}
}
@@ -620,30 +1096,17 @@ void hci_conn_check_pending(struct hci_dev *hdev)
conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
if (conn)
- hci_acl_connect(conn);
+ hci_acl_create_connection(conn);
hci_dev_unlock(hdev);
}
-void hci_conn_hold_device(struct hci_conn *conn)
-{
- atomic_inc(&conn->devref);
-}
-EXPORT_SYMBOL(hci_conn_hold_device);
-
-void hci_conn_put_device(struct hci_conn *conn)
-{
- if (atomic_dec_and_test(&conn->devref))
- hci_conn_del_sysfs(conn);
-}
-EXPORT_SYMBOL(hci_conn_put_device);
-
int hci_get_conn_list(void __user *arg)
{
+ struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
- struct list_head *p;
int n = 0, size, err;
if (copy_from_user(&req, arg, sizeof(req)))
@@ -666,11 +1129,8 @@ int hci_get_conn_list(void __user *arg)
ci = cl->conn_info;
- hci_dev_lock_bh(hdev);
- list_for_each(p, &hdev->conn_hash.list) {
- register struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
-
+ hci_dev_lock(hdev);
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
bacpy(&(ci + n)->bdaddr, &c->dst);
(ci + n)->handle = c->handle;
(ci + n)->type = c->type;
@@ -680,7 +1140,7 @@ int hci_get_conn_list(void __user *arg)
if (++n >= req.conn_num)
break;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
cl->dev_id = hdev->id;
cl->conn_num = n;
@@ -704,7 +1164,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
if (conn) {
bacpy(&ci.bdaddr, &conn->dst);
@@ -714,7 +1174,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
ci.state = conn->state;
ci.link_mode = conn->link_mode;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
@@ -730,14 +1190,93 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
if (conn)
req.type = conn->auth_type;
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (!conn)
return -ENOENT;
return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
}
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_chan *chan;
+
+ BT_DBG("%s hcon %p", hdev->name, conn);
+
+ chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
+ if (!chan)
+ return NULL;
+
+ chan->conn = conn;
+ skb_queue_head_init(&chan->data_q);
+ chan->state = BT_CONNECTED;
+
+ list_add_rcu(&chan->list, &conn->chan_list);
+
+ return chan;
+}
+
+void hci_chan_del(struct hci_chan *chan)
+{
+ struct hci_conn *conn = chan->conn;
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
+
+ list_del_rcu(&chan->list);
+
+ synchronize_rcu();
+
+ hci_conn_drop(conn);
+
+ skb_queue_purge(&chan->data_q);
+ kfree(chan);
+}
+
+void hci_chan_list_flush(struct hci_conn *conn)
+{
+ struct hci_chan *chan, *n;
+
+ BT_DBG("hcon %p", conn);
+
+ list_for_each_entry_safe(chan, n, &conn->chan_list, list)
+ hci_chan_del(chan);
+}
+
+static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
+ __u16 handle)
+{
+ struct hci_chan *hchan;
+
+ list_for_each_entry(hchan, &hcon->chan_list, list) {
+ if (hchan->handle == handle)
+ return hchan;
+ }
+
+ return NULL;
+}
+
+struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *hcon;
+ struct hci_chan *hchan = NULL;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(hcon, &h->list, list) {
+ hchan = __hci_chan_lookup_handle(hcon, handle);
+ if (hchan)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return hchan;
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 9c4541bc488..0a43cce9a91 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -24,38 +25,22 @@
/* Bluetooth HCI core. */
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/kmod.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
+#include <linux/export.h>
+#include <linux/idr.h>
#include <linux/rfkill.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/crypto.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
-static void hci_cmd_task(unsigned long arg);
-static void hci_rx_task(unsigned long arg);
-static void hci_tx_task(unsigned long arg);
-static void hci_notify(struct hci_dev *hdev, int event);
+#include "smp.h"
-static DEFINE_RWLOCK(hci_task_lock);
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
/* HCI device list */
LIST_HEAD(hci_dev_list);
@@ -65,42 +50,1036 @@ DEFINE_RWLOCK(hci_dev_list_lock);
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
-/* HCI protocols */
-#define HCI_MAX_PROTO 2
-struct hci_proto *hci_proto[HCI_MAX_PROTO];
-
-/* HCI notifiers list */
-static ATOMIC_NOTIFIER_HEAD(hci_notifier);
+/* HCI ID Numbering */
+static DEFINE_IDA(hci_index_ida);
/* ---- HCI notifications ---- */
-int hci_register_notifier(struct notifier_block *nb)
+static void hci_notify(struct hci_dev *hdev, int event)
{
- return atomic_notifier_chain_register(&hci_notifier, nb);
+ hci_sock_dev_event(hdev, event);
}
-int hci_unregister_notifier(struct notifier_block *nb)
+/* ---- HCI debugfs entries ---- */
+
+static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- return atomic_notifier_chain_unregister(&hci_notifier, nb);
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
-static void hci_notify(struct hci_dev *hdev, int event)
+static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- atomic_notifier_call_chain(&hci_notifier, event, hdev);
+ struct hci_dev *hdev = file->private_data;
+ struct sk_buff *skb;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+ bool enable;
+ int err;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return -ENETDOWN;
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
+ return -EALREADY;
+
+ hci_req_lock(hdev);
+ if (enable)
+ skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ else
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ hci_req_unlock(hdev);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ err = -bt_to_errno(skb->data[0]);
+ kfree_skb(skb);
+
+ if (err < 0)
+ return err;
+
+ change_bit(HCI_DUT_MODE, &hdev->dev_flags);
+
+ return count;
}
-/* ---- HCI requests ---- */
+static const struct file_operations dut_mode_fops = {
+ .open = simple_open,
+ .read = dut_mode_read,
+ .write = dut_mode_write,
+ .llseek = default_llseek,
+};
-void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
+static int features_show(struct seq_file *f, void *ptr)
{
- BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
+ struct hci_dev *hdev = f->private;
+ u8 p;
- /* If the request has set req_last_cmd (typical for multi-HCI
- * command requests) check if the completed command matches
- * this, and if not just return. Single HCI command requests
- * typically leave req_last_cmd as 0 */
- if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
- return;
+ hci_dev_lock(hdev);
+ for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+ seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+ "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
+ hdev->features[p][0], hdev->features[p][1],
+ hdev->features[p][2], hdev->features[p][3],
+ hdev->features[p][4], hdev->features[p][5],
+ hdev->features[p][6], hdev->features[p][7]);
+ }
+ if (lmp_le_capable(hdev))
+ seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+ "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
+ hdev->le_features[0], hdev->le_features[1],
+ hdev->le_features[2], hdev->le_features[3],
+ hdev->le_features[4], hdev->le_features[5],
+ hdev->le_features[6], hdev->le_features[7]);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int features_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, features_show, inode->i_private);
+}
+
+static const struct file_operations features_fops = {
+ .open = features_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int blacklist_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct bdaddr_list *b;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->blacklist, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int blacklist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, blacklist_show, inode->i_private);
+}
+
+static const struct file_operations blacklist_fops = {
+ .open = blacklist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct bt_uuid *uuid;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u8 i, val[16];
+
+ /* The Bluetooth UUID values are stored in big endian,
+ * but with reversed byte order. So convert them into
+ * the right order for the %pUb modifier.
+ */
+ for (i = 0; i < 16; i++)
+ val[i] = uuid->uuid[15 - i];
+
+ seq_printf(f, "%pUb\n", val);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+ .open = uuids_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int inquiry_cache_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ hci_dev_lock(hdev);
+
+ list_for_each_entry(e, &cache->all, all) {
+ struct inquiry_data *data = &e->data;
+ seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+ &data->bdaddr,
+ data->pscan_rep_mode, data->pscan_period_mode,
+ data->pscan_mode, data->dev_class[2],
+ data->dev_class[1], data->dev_class[0],
+ __le16_to_cpu(data->clock_offset),
+ data->rssi, data->ssp_mode, e->timestamp);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, inquiry_cache_show, inode->i_private);
+}
+
+static const struct file_operations inquiry_cache_fops = {
+ .open = inquiry_cache_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int link_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct list_head *p, *n;
+
+ hci_dev_lock(hdev);
+ list_for_each_safe(p, n, &hdev->link_keys) {
+ struct link_key *key = list_entry(p, struct link_key, list);
+ seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
+ HCI_LINK_KEY_SIZE, key->val, key->pin_len);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int link_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, link_keys_show, inode->i_private);
+}
+
+static const struct file_operations link_keys_fops = {
+ .open = link_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int dev_class_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+ hdev->dev_class[1], hdev->dev_class[0]);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int dev_class_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dev_class_show, inode->i_private);
+}
+
+static const struct file_operations dev_class_fops = {
+ .open = dev_class_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int voice_setting_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->voice_setting;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
+ NULL, "0x%4.4llx\n");
+
+static int auto_accept_delay_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ hdev->auto_accept_delay = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int auto_accept_delay_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->auto_accept_delay;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
+ auto_accept_delay_set, "%llu\n");
+
+static int ssp_debug_mode_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+ struct sk_buff *skb;
+ __u8 mode;
+ int err;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return -ENETDOWN;
+
+ hci_req_lock(hdev);
+ mode = val;
+ skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
+ &mode, HCI_CMD_TIMEOUT);
+ hci_req_unlock(hdev);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ err = -bt_to_errno(skb->data[0]);
+ kfree_skb(skb);
+
+ if (err < 0)
+ return err;
+
+ hci_dev_lock(hdev);
+ hdev->ssp_debug_mode = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int ssp_debug_mode_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->ssp_debug_mode;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
+ ssp_debug_mode_set, "%llu\n");
+
+static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_sc_support_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+ bool enable;
+
+ if (test_bit(HCI_UP, &hdev->flags))
+ return -EBUSY;
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+ return -EALREADY;
+
+ change_bit(HCI_FORCE_SC, &hdev->dev_flags);
+
+ return count;
+}
+
+static const struct file_operations force_sc_support_fops = {
+ .open = simple_open,
+ .read = force_sc_support_read,
+ .write = force_sc_support_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations sc_only_mode_fops = {
+ .open = simple_open,
+ .read = sc_only_mode_read,
+ .llseek = default_llseek,
+};
+
+static int idle_timeout_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val != 0 && (val < 500 || val > 3600000))
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->idle_timeout = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int idle_timeout_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->idle_timeout;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
+ idle_timeout_set, "%llu\n");
+
+static int rpa_timeout_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ /* Require the RPA timeout to be at least 30 seconds and at most
+ * 24 hours.
+ */
+ if (val < 30 || val > (60 * 60 * 24))
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->rpa_timeout = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int rpa_timeout_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->rpa_timeout;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
+ rpa_timeout_set, "%llu\n");
+
+static int sniff_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->sniff_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int sniff_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->sniff_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
+ sniff_min_interval_set, "%llu\n");
+
+static int sniff_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->sniff_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int sniff_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->sniff_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
+ sniff_max_interval_set, "%llu\n");
+
+static int conn_info_min_age_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val > hdev->conn_info_max_age)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->conn_info_min_age = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_info_min_age_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->conn_info_min_age;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
+ conn_info_min_age_set, "%llu\n");
+
+static int conn_info_max_age_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val == 0 || val < hdev->conn_info_min_age)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->conn_info_max_age = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_info_max_age_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->conn_info_max_age;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
+ conn_info_max_age_set, "%llu\n");
+
+static int identity_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+ bdaddr_t addr;
+ u8 addr_type;
+
+ hci_dev_lock(hdev);
+
+ hci_copy_identity_address(hdev, &addr, &addr_type);
+
+ seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
+ 16, hdev->irk, &hdev->rpa);
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int identity_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, identity_show, inode->i_private);
+}
+
+static const struct file_operations identity_fops = {
+ .open = identity_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int random_address_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "%pMR\n", &hdev->random_addr);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int random_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, random_address_show, inode->i_private);
+}
+
+static const struct file_operations random_address_fops = {
+ .open = random_address_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int static_address_show(struct seq_file *f, void *p)
+{
+ struct hci_dev *hdev = f->private;
+
+ hci_dev_lock(hdev);
+ seq_printf(f, "%pMR\n", &hdev->static_addr);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int static_address_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, static_address_show, inode->i_private);
+}
+
+static const struct file_operations static_address_fops = {
+ .open = static_address_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t force_static_address_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_static_address_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+ bool enable;
+
+ if (test_bit(HCI_UP, &hdev->flags))
+ return -EBUSY;
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ if (strtobool(buf, &enable))
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
+ return -EALREADY;
+
+ change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
+
+ return count;
+}
+
+static const struct file_operations force_static_address_fops = {
+ .open = simple_open,
+ .read = force_static_address_read,
+ .write = force_static_address_write,
+ .llseek = default_llseek,
+};
+
+static int white_list_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct bdaddr_list *b;
+
+ hci_dev_lock(hdev);
+ list_for_each_entry(b, &hdev->le_white_list, list)
+ seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int white_list_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, white_list_show, inode->i_private);
+}
+
+static const struct file_operations white_list_fops = {
+ .open = white_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct list_head *p, *n;
+
+ hci_dev_lock(hdev);
+ list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
+ struct smp_irk *irk = list_entry(p, struct smp_irk, list);
+ seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
+ &irk->bdaddr, irk->addr_type,
+ 16, irk->val, &irk->rpa);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int identity_resolving_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, identity_resolving_keys_show,
+ inode->i_private);
+}
+
+static const struct file_operations identity_resolving_keys_fops = {
+ .open = identity_resolving_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int long_term_keys_show(struct seq_file *f, void *ptr)
+{
+ struct hci_dev *hdev = f->private;
+ struct list_head *p, *n;
+
+ hci_dev_lock(hdev);
+ list_for_each_safe(p, n, &hdev->long_term_keys) {
+ struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
+ seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
+ &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
+ ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
+ __le64_to_cpu(ltk->rand), 16, ltk->val);
+ }
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int long_term_keys_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, long_term_keys_show, inode->i_private);
+}
+
+static const struct file_operations long_term_keys_fops = {
+ .open = long_term_keys_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int conn_min_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_conn_min_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_min_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_conn_min_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
+ conn_min_interval_set, "%llu\n");
+
+static int conn_max_interval_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_conn_max_interval = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int conn_max_interval_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_conn_max_interval;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
+ conn_max_interval_set, "%llu\n");
+
+static int adv_channel_map_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 0x01 || val > 0x07)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->le_adv_channel_map = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int adv_channel_map_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->le_adv_channel_map;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
+ adv_channel_map_set, "%llu\n");
+
+static ssize_t lowpan_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ char buf[3];
+
+ buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ struct hci_dev *hdev = fp->private_data;
+ bool enable;
+ char buf[32];
+ size_t buf_size = min(count, (sizeof(buf)-1));
+
+ if (copy_from_user(buf, user_buffer, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &enable) < 0)
+ return -EINVAL;
+
+ if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
+ return -EALREADY;
+
+ change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
+
+ return count;
+}
+
+static const struct file_operations lowpan_debugfs_fops = {
+ .open = simple_open,
+ .read = lowpan_read,
+ .write = lowpan_write,
+ .llseek = default_llseek,
+};
+
+static int le_auto_conn_show(struct seq_file *sf, void *ptr)
+{
+ struct hci_dev *hdev = sf->private;
+ struct hci_conn_params *p;
+
+ hci_dev_lock(hdev);
+
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
+ p->auto_connect);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int le_auto_conn_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, le_auto_conn_show, inode->i_private);
+}
+
+static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
+ size_t count, loff_t *offset)
+{
+ struct seq_file *sf = file->private_data;
+ struct hci_dev *hdev = sf->private;
+ u8 auto_connect = 0;
+ bdaddr_t addr;
+ u8 addr_type;
+ char *buf;
+ int err = 0;
+ int n;
+
+ /* Don't allow partial write */
+ if (*offset != 0)
+ return -EINVAL;
+
+ if (count < 3)
+ return -EINVAL;
+
+ buf = memdup_user(data, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ if (memcmp(buf, "add", 3) == 0) {
+ n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
+ &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
+ &addr.b[1], &addr.b[0], &addr_type,
+ &auto_connect);
+
+ if (n < 7) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ hci_dev_lock(hdev);
+ err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
+ hdev->le_conn_min_interval,
+ hdev->le_conn_max_interval);
+ hci_dev_unlock(hdev);
+
+ if (err)
+ goto done;
+ } else if (memcmp(buf, "del", 3) == 0) {
+ n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
+ &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
+ &addr.b[1], &addr.b[0], &addr_type);
+
+ if (n < 7) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ hci_dev_lock(hdev);
+ hci_conn_params_del(hdev, &addr, addr_type);
+ hci_dev_unlock(hdev);
+ } else if (memcmp(buf, "clr", 3) == 0) {
+ hci_dev_lock(hdev);
+ hci_conn_params_clear(hdev);
+ hci_pend_le_conns_clear(hdev);
+ hci_update_background_scan(hdev);
+ hci_dev_unlock(hdev);
+ } else {
+ err = -EINVAL;
+ }
+
+done:
+ kfree(buf);
+
+ if (err)
+ return err;
+ else
+ return count;
+}
+
+static const struct file_operations le_auto_conn_fops = {
+ .open = le_auto_conn_open,
+ .read = seq_read,
+ .write = le_auto_conn_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* ---- HCI requests ---- */
+
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
+{
+ BT_DBG("%s result 0x%2.2x", hdev->name, result);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
@@ -120,21 +1099,158 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
}
}
+static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
+ u8 event)
+{
+ struct hci_ev_cmd_complete *ev;
+ struct hci_event_hdr *hdr;
+ struct sk_buff *skb;
+
+ hci_dev_lock(hdev);
+
+ skb = hdev->recv_evt;
+ hdev->recv_evt = NULL;
+
+ hci_dev_unlock(hdev);
+
+ if (!skb)
+ return ERR_PTR(-ENODATA);
+
+ if (skb->len < sizeof(*hdr)) {
+ BT_ERR("Too short HCI event");
+ goto failed;
+ }
+
+ hdr = (void *) skb->data;
+ skb_pull(skb, HCI_EVENT_HDR_SIZE);
+
+ if (event) {
+ if (hdr->evt != event)
+ goto failed;
+ return skb;
+ }
+
+ if (hdr->evt != HCI_EV_CMD_COMPLETE) {
+ BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
+ goto failed;
+ }
+
+ if (skb->len < sizeof(*ev)) {
+ BT_ERR("Too short cmd_complete event");
+ goto failed;
+ }
+
+ ev = (void *) skb->data;
+ skb_pull(skb, sizeof(*ev));
+
+ if (opcode == __le16_to_cpu(ev->opcode))
+ return skb;
+
+ BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
+ __le16_to_cpu(ev->opcode));
+
+failed:
+ kfree_skb(skb);
+ return ERR_PTR(-ENODATA);
+}
+
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct hci_request req;
+ int err = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_req_init(&req, hdev);
+
+ hci_req_add_ev(&req, opcode, plen, param, event);
+
+ hdev->req_status = HCI_REQ_PEND;
+
+ err = hci_req_run(&req, hci_req_sync_complete);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ add_wait_queue(&hdev->req_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ schedule_timeout(timeout);
+
+ remove_wait_queue(&hdev->req_wait_q, &wait);
+
+ if (signal_pending(current))
+ return ERR_PTR(-EINTR);
+
+ switch (hdev->req_status) {
+ case HCI_REQ_DONE:
+ err = -bt_to_errno(hdev->req_result);
+ break;
+
+ case HCI_REQ_CANCELED:
+ err = -hdev->req_result;
+ break;
+
+ default:
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ hdev->req_status = hdev->req_result = 0;
+
+ BT_DBG("%s end: err %d", hdev->name, err);
+
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return hci_get_cmd_complete(hdev, opcode, event);
+}
+EXPORT_SYMBOL(__hci_cmd_sync_ev);
+
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
+}
+EXPORT_SYMBOL(__hci_cmd_sync);
+
/* Execute request and wait for completion. */
-static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static int __hci_req_sync(struct hci_dev *hdev,
+ void (*func)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
+ struct hci_request req;
DECLARE_WAITQUEUE(wait, current);
int err = 0;
BT_DBG("%s start", hdev->name);
+ hci_req_init(&req, hdev);
+
hdev->req_status = HCI_REQ_PEND;
+ func(&req, opt);
+
+ err = hci_req_run(&req, hci_req_sync_complete);
+ if (err < 0) {
+ hdev->req_status = 0;
+
+ /* ENODATA means the HCI request command queue is empty.
+ * This can happen when a request with conditionals doesn't
+ * trigger any commands to be sent. This is normal behavior
+ * and should not trigger an error return.
+ */
+ if (err == -ENODATA)
+ return 0;
+
+ return err;
+ }
+
add_wait_queue(&hdev->req_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- req(hdev, opt);
schedule_timeout(timeout);
remove_wait_queue(&hdev->req_wait_q, &wait);
@@ -144,7 +1260,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
switch (hdev->req_status) {
case HCI_REQ_DONE:
- err = -bt_err(hdev->req_result);
+ err = -bt_to_errno(hdev->req_result);
break;
case HCI_REQ_CANCELED:
@@ -156,15 +1272,17 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
break;
}
- hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
+ hdev->req_status = hdev->req_result = 0;
BT_DBG("%s end: err %d", hdev->name, err);
return err;
}
-static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static int hci_req_sync(struct hci_dev *hdev,
+ void (*req)(struct hci_request *req,
+ unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
int ret;
@@ -173,142 +1291,653 @@ static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *
/* Serialize all requests */
hci_req_lock(hdev);
- ret = __hci_request(hdev, req, opt, timeout);
+ ret = __hci_req_sync(hdev, req, opt, timeout);
hci_req_unlock(hdev);
return ret;
}
-static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_reset_req(struct hci_request *req, unsigned long opt)
{
- BT_DBG("%s %ld", hdev->name, opt);
+ BT_DBG("%s %ld", req->hdev->name, opt);
/* Reset device */
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+ set_bit(HCI_RESET, &req->hdev->flags);
+ hci_req_add(req, HCI_OP_RESET, 0, NULL);
}
-static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
+static void bredr_init(struct hci_request *req)
{
- struct sk_buff *skb;
- __le16 param;
- __u8 flt_type;
+ req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
- BT_DBG("%s %ld", hdev->name, opt);
+ /* Read Local Supported Features */
+ hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
- /* Driver initialization */
+ /* Read Local Version */
+ hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
- /* Special commands */
- while ((skb = skb_dequeue(&hdev->driver_init))) {
- bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- skb->dev = (void *) hdev;
+ /* Read BD Address */
+ hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
+}
- skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
- }
- skb_queue_purge(&hdev->driver_init);
+static void amp_init(struct hci_request *req)
+{
+ req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
- /* Mandatory initialization */
+ /* Read Local Version */
+ hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
- /* Reset */
- if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
+ /* Read Local Supported Commands */
+ hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
/* Read Local Supported Features */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+ hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
- /* Read Local Version */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+ /* Read Local AMP Info */
+ hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
- /* Read Buffer Size (ACL mtu, max pkt, etc.) */
- hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
+ /* Read Data Blk size */
+ hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+
+ /* Read Flow Control Mode */
+ hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
+
+ /* Read Location Data */
+ hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
+}
+
+static void hci_init1_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ BT_DBG("%s %ld", hdev->name, opt);
+
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
+ hci_reset_req(req, 0);
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ bredr_init(req);
+ break;
-#if 0
- /* Host buffer size */
- {
- struct hci_cp_host_buffer_size cp;
- cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
- cp.sco_mtu = HCI_MAX_SCO_SIZE;
- cp.acl_max_pkt = cpu_to_le16(0xffff);
- cp.sco_max_pkt = cpu_to_le16(0xffff);
- hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
+ case HCI_AMP:
+ amp_init(req);
+ break;
+
+ default:
+ BT_ERR("Unknown device type %d", hdev->dev_type);
+ break;
}
-#endif
+}
- /* Read BD Address */
- hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
+static void bredr_setup(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ __le16 param;
+ __u8 flt_type;
+
+ /* Read Buffer Size (ACL mtu, max pkt, etc.) */
+ hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
/* Read Class of Device */
- hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
+ hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
/* Read Local Name */
- hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
+ hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
/* Read Voice Setting */
- hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+ hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
+
+ /* Read Number of Supported IAC */
+ hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
- /* Optional initialization */
+ /* Read Current IAC LAP */
+ hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
/* Clear Event Filters */
flt_type = HCI_FLT_CLEAR_ALL;
- hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
-
- /* Page timeout ~20 secs */
- param = cpu_to_le16(0x8000);
- hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
+ hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
/* Connection accept timeout ~20 secs */
param = cpu_to_le16(0x7d00);
- hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
+ hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
+
+ /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
+ * but it does not support page scan related HCI commands.
+ */
+ if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
+ hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
+ }
+}
+
+static void le_setup(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* Read LE Buffer Size */
+ hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
+
+ /* Read LE Local Supported Features */
+ hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
+
+ /* Read LE Supported States */
+ hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
+
+ /* Read LE Advertising Channel TX Power */
+ hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
+
+ /* Read LE White List Size */
+ hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
+
+ /* Clear LE White List */
+ hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
+
+ /* LE-only controllers have LE implicitly enabled */
+ if (!lmp_bredr_capable(hdev))
+ set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+}
+
+static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
+{
+ if (lmp_ext_inq_capable(hdev))
+ return 0x02;
+
+ if (lmp_inq_rssi_capable(hdev))
+ return 0x01;
+
+ if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
+ hdev->lmp_subver == 0x0757)
+ return 0x01;
+
+ if (hdev->manufacturer == 15) {
+ if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
+ return 0x01;
+ if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
+ return 0x01;
+ if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
+ return 0x01;
+ }
+
+ if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
+ hdev->lmp_subver == 0x1805)
+ return 0x01;
+
+ return 0x00;
+}
+
+static void hci_setup_inquiry_mode(struct hci_request *req)
+{
+ u8 mode;
+
+ mode = hci_get_inquiry_mode(req->hdev);
+
+ hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
+}
+
+static void hci_setup_event_mask(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* The second byte is 0xff instead of 0x9f (two reserved bits
+ * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+ * command otherwise.
+ */
+ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+ * any event mask for pre 1.2 devices.
+ */
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return;
+
+ if (lmp_bredr_capable(hdev)) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ } else {
+ /* Use a different default for LE-only devices */
+ memset(events, 0, sizeof(events));
+ events[0] |= 0x10; /* Disconnection Complete */
+ events[0] |= 0x80; /* Encryption Change */
+ events[1] |= 0x08; /* Read Remote Version Information Complete */
+ events[1] |= 0x20; /* Command Complete */
+ events[1] |= 0x40; /* Command Status */
+ events[1] |= 0x80; /* Hardware Error */
+ events[2] |= 0x04; /* Number of Completed Packets */
+ events[3] |= 0x02; /* Data Buffer Overflow */
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+ }
+
+ if (lmp_inq_rssi_capable(hdev))
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+
+ if (lmp_sniffsubr_capable(hdev))
+ events[5] |= 0x20; /* Sniff Subrating */
+
+ if (lmp_pause_enc_capable(hdev))
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+ if (lmp_ext_inq_capable(hdev))
+ events[5] |= 0x40; /* Extended Inquiry Result */
+
+ if (lmp_no_flush_capable(hdev))
+ events[7] |= 0x01; /* Enhanced Flush Complete */
+
+ if (lmp_lsto_capable(hdev))
+ events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+ if (lmp_ssp_capable(hdev)) {
+ events[6] |= 0x01; /* IO Capability Request */
+ events[6] |= 0x02; /* IO Capability Response */
+ events[6] |= 0x04; /* User Confirmation Request */
+ events[6] |= 0x08; /* User Passkey Request */
+ events[6] |= 0x10; /* Remote OOB Data Request */
+ events[6] |= 0x20; /* Simple Pairing Complete */
+ events[7] |= 0x04; /* User Passkey Notification */
+ events[7] |= 0x08; /* Keypress Notification */
+ events[7] |= 0x10; /* Remote Host Supported
+ * Features Notification
+ */
+ }
+
+ if (lmp_le_capable(hdev))
+ events[7] |= 0x20; /* LE Meta-Event */
+
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
+
+ if (lmp_le_capable(hdev)) {
+ memset(events, 0, sizeof(events));
+ events[0] = 0x1f;
+ hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
+ sizeof(events), events);
+ }
+}
+
+static void hci_init2_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ if (lmp_bredr_capable(hdev))
+ bredr_setup(req);
+ else
+ clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ if (lmp_le_capable(hdev))
+ le_setup(req);
+
+ hci_setup_event_mask(req);
+
+ /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
+ * local supported commands HCI command.
+ */
+ if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
+ hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+ if (lmp_ssp_capable(hdev)) {
+ /* When SSP is available, then the host features page
+ * should also be available as well. However some
+ * controllers list the max_page as 0 as long as SSP
+ * has not been enabled. To achieve proper debugging
+ * output, force the minimum max_page to 1 at least.
+ */
+ hdev->max_page = 0x01;
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ u8 mode = 0x01;
+ hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode);
+ } else {
+ struct hci_cp_write_eir cp;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(&cp, 0, sizeof(cp));
+
+ hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+ }
+ }
+
+ if (lmp_inq_rssi_capable(hdev))
+ hci_setup_inquiry_mode(req);
+
+ if (lmp_inq_tx_pwr_capable(hdev))
+ hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
+
+ if (lmp_ext_feat_capable(hdev)) {
+ struct hci_cp_read_local_ext_features cp;
+
+ cp.page = 0x01;
+ hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
+ sizeof(cp), &cp);
+ }
+
+ if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
+ u8 enable = 1;
+ hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
+ &enable);
+ }
+}
+
+static void hci_setup_link_policy(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_def_link_policy cp;
+ u16 link_policy = 0;
+
+ if (lmp_rswitch_capable(hdev))
+ link_policy |= HCI_LP_RSWITCH;
+ if (lmp_hold_capable(hdev))
+ link_policy |= HCI_LP_HOLD;
+ if (lmp_sniff_capable(hdev))
+ link_policy |= HCI_LP_SNIFF;
+ if (lmp_park_capable(hdev))
+ link_policy |= HCI_LP_PARK;
+
+ cp.policy = cpu_to_le16(link_policy);
+ hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
+}
+
+static void hci_set_le_support(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_le_host_supported cp;
+
+ /* LE-only devices do not support explicit enablement */
+ if (!lmp_bredr_capable(hdev))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ cp.le = 0x01;
+ cp.simul = lmp_le_br_capable(hdev);
+ }
+
+ if (cp.le != lmp_host_le_capable(hdev))
+ hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
+ &cp);
+}
+
+static void hci_set_event_mask_page_2(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ /* If Connectionless Slave Broadcast master role is supported
+ * enable all necessary events for it.
+ */
+ if (lmp_csb_master_capable(hdev)) {
+ events[1] |= 0x40; /* Triggered Clock Capture */
+ events[1] |= 0x80; /* Synchronization Train Complete */
+ events[2] |= 0x10; /* Slave Page Response Timeout */
+ events[2] |= 0x20; /* CSB Channel Map Change */
+ }
- hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
+ /* If Connectionless Slave Broadcast slave role is supported
+ * enable all necessary events for it.
+ */
+ if (lmp_csb_slave_capable(hdev)) {
+ events[2] |= 0x01; /* Synchronization Train Received */
+ events[2] |= 0x02; /* CSB Receive */
+ events[2] |= 0x04; /* CSB Timeout */
+ events[2] |= 0x08; /* Truncated Page Complete */
+ }
+
+ /* Enable Authenticated Payload Timeout Expired event if supported */
+ if (lmp_ping_capable(hdev))
+ events[2] |= 0x80;
+
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+}
+
+static void hci_init3_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 p;
+
+ /* Some Broadcom based Bluetooth controllers do not support the
+ * Delete Stored Link Key command. They are clearly indicating its
+ * absence in the bit mask of supported commands.
+ *
+ * Check the supported commands and only if the the command is marked
+ * as supported send it. If not supported assume that the controller
+ * does not have actual support for stored link keys which makes this
+ * command redundant anyway.
+ *
+ * Some controllers indicate that they support handling deleting
+ * stored link keys, but they don't. The quirk lets a driver
+ * just disable this command.
+ */
+ if (hdev->commands[6] & 0x80 &&
+ !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
+ struct hci_cp_delete_stored_link_key cp;
+
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 0x01;
+ hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
+ sizeof(cp), &cp);
+ }
+
+ if (hdev->commands[5] & 0x10)
+ hci_setup_link_policy(req);
+
+ if (lmp_le_capable(hdev))
+ hci_set_le_support(req);
+
+ /* Read features beyond page 1 if available */
+ for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+ struct hci_cp_read_local_ext_features cp;
+
+ cp.page = p;
+ hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
+ sizeof(cp), &cp);
+ }
+}
+
+static void hci_init4_req(struct hci_request *req, unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* Set event mask page 2 if the HCI command for it is supported */
+ if (hdev->commands[22] & 0x04)
+ hci_set_event_mask_page_2(req);
+
+ /* Check for Synchronization Train support */
+ if (lmp_sync_train_capable(hdev))
+ hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
+
+ /* Enable Secure Connections if supported and configured */
+ if ((lmp_sc_capable(hdev) ||
+ test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
+ test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
+ u8 support = 0x01;
+ hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
+ sizeof(support), &support);
+ }
+}
+
+static int __hci_init(struct hci_dev *hdev)
+{
+ int err;
+
+ err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ /* The Device Under Test (DUT) mode is special and available for
+ * all controller types. So just create it early on.
+ */
+ if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
+ &dut_mode_fops);
+ }
+
+ /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
+ * BR/EDR/LE type controllers. AMP controllers only need the
+ * first stage init.
+ */
+ if (hdev->dev_type != HCI_BREDR)
+ return 0;
+
+ err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ /* Only create debugfs entries during the initial setup
+ * phase and not every time the controller gets powered on.
+ */
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+ return 0;
+
+ debugfs_create_file("features", 0444, hdev->debugfs, hdev,
+ &features_fops);
+ debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
+ &hdev->manufacturer);
+ debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
+ debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+ debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
+ &blacklist_fops);
+ debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
+ debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
+ &conn_info_min_age_fops);
+ debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
+ &conn_info_max_age_fops);
+
+ if (lmp_bredr_capable(hdev)) {
+ debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
+ hdev, &inquiry_cache_fops);
+ debugfs_create_file("link_keys", 0400, hdev->debugfs,
+ hdev, &link_keys_fops);
+ debugfs_create_file("dev_class", 0444, hdev->debugfs,
+ hdev, &dev_class_fops);
+ debugfs_create_file("voice_setting", 0444, hdev->debugfs,
+ hdev, &voice_setting_fops);
+ }
+
+ if (lmp_ssp_capable(hdev)) {
+ debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
+ hdev, &auto_accept_delay_fops);
+ debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
+ hdev, &ssp_debug_mode_fops);
+ debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
+ hdev, &force_sc_support_fops);
+ debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
+ hdev, &sc_only_mode_fops);
+ }
+
+ if (lmp_sniff_capable(hdev)) {
+ debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
+ hdev, &idle_timeout_fops);
+ debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
+ hdev, &sniff_min_interval_fops);
+ debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
+ hdev, &sniff_max_interval_fops);
+ }
+
+ if (lmp_le_capable(hdev)) {
+ debugfs_create_file("identity", 0400, hdev->debugfs,
+ hdev, &identity_fops);
+ debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
+ hdev, &rpa_timeout_fops);
+ debugfs_create_file("random_address", 0444, hdev->debugfs,
+ hdev, &random_address_fops);
+ debugfs_create_file("static_address", 0444, hdev->debugfs,
+ hdev, &static_address_fops);
+
+ /* For controllers with a public address, provide a debug
+ * option to force the usage of the configured static
+ * address. By default the public address is used.
+ */
+ if (bacmp(&hdev->bdaddr, BDADDR_ANY))
+ debugfs_create_file("force_static_address", 0644,
+ hdev->debugfs, hdev,
+ &force_static_address_fops);
+
+ debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
+ &hdev->le_white_list_size);
+ debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
+ &white_list_fops);
+ debugfs_create_file("identity_resolving_keys", 0400,
+ hdev->debugfs, hdev,
+ &identity_resolving_keys_fops);
+ debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
+ hdev, &long_term_keys_fops);
+ debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
+ hdev, &conn_min_interval_fops);
+ debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
+ hdev, &conn_max_interval_fops);
+ debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
+ hdev, &adv_channel_map_fops);
+ debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
+ &lowpan_debugfs_fops);
+ debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
+ &le_auto_conn_fops);
+ debugfs_create_u16("discov_interleaved_timeout", 0644,
+ hdev->debugfs,
+ &hdev->discov_interleaved_timeout);
+ }
+
+ return 0;
}
-static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_scan_req(struct hci_request *req, unsigned long opt)
{
__u8 scan = opt;
- BT_DBG("%s %x", hdev->name, scan);
+ BT_DBG("%s %x", req->hdev->name, scan);
/* Inquiry and Page scans */
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
-static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_auth_req(struct hci_request *req, unsigned long opt)
{
__u8 auth = opt;
- BT_DBG("%s %x", hdev->name, auth);
+ BT_DBG("%s %x", req->hdev->name, auth);
/* Authentication */
- hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
+ hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
}
-static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
{
__u8 encrypt = opt;
- BT_DBG("%s %x", hdev->name, encrypt);
+ BT_DBG("%s %x", req->hdev->name, encrypt);
/* Encryption */
- hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
+ hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
}
-static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
{
__le16 policy = cpu_to_le16(opt);
- BT_DBG("%s %x", hdev->name, policy);
+ BT_DBG("%s %x", req->hdev->name, policy);
/* Default link policy */
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
+ hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
}
/* Get HCI device by index.
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
- struct hci_dev *hdev = NULL;
- struct list_head *p;
+ struct hci_dev *hdev = NULL, *d;
BT_DBG("%d", index);
@@ -316,8 +1945,7 @@ struct hci_dev *hci_dev_get(int index)
return NULL;
read_lock(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
+ list_for_each_entry(d, &hci_dev_list, list) {
if (d->id == index) {
hdev = hci_dev_hold(d);
break;
@@ -328,81 +1956,222 @@ struct hci_dev *hci_dev_get(int index)
}
/* ---- Inquiry support ---- */
-static void inquiry_cache_flush(struct hci_dev *hdev)
+
+bool hci_discovery_active(struct hci_dev *hdev)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
- struct inquiry_entry *next = cache->list, *e;
+ struct discovery_state *discov = &hdev->discovery;
- BT_DBG("cache %p", cache);
+ switch (discov->state) {
+ case DISCOVERY_FINDING:
+ case DISCOVERY_RESOLVING:
+ return true;
- cache->list = NULL;
- while ((e = next)) {
- next = e->next;
- kfree(e);
+ default:
+ return false;
}
}
-struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+void hci_discovery_set_state(struct hci_dev *hdev, int state)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
+
+ if (hdev->discovery.state == state)
+ return;
+
+ switch (state) {
+ case DISCOVERY_STOPPED:
+ hci_update_background_scan(hdev);
+
+ if (hdev->discovery.state != DISCOVERY_STARTING)
+ mgmt_discovering(hdev, 0);
+ break;
+ case DISCOVERY_STARTING:
+ break;
+ case DISCOVERY_FINDING:
+ mgmt_discovering(hdev, 1);
+ break;
+ case DISCOVERY_RESOLVING:
+ break;
+ case DISCOVERY_STOPPING:
+ break;
+ }
+
+ hdev->discovery.state = state;
+}
+
+void hci_inquiry_cache_flush(struct hci_dev *hdev)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *p, *n;
+
+ list_for_each_entry_safe(p, n, &cache->all, all) {
+ list_del(&p->all);
+ kfree(p);
+ }
+
+ INIT_LIST_HEAD(&cache->unknown);
+ INIT_LIST_HEAD(&cache->resolve);
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
+{
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
- BT_DBG("cache %p, %s", cache, batostr(bdaddr));
+ BT_DBG("cache %p, %pMR", cache, bdaddr);
- for (e = cache->list; e; e = e->next)
+ list_for_each_entry(e, &cache->all, all) {
if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("cache %p, %pMR", cache, bdaddr);
+
+ list_for_each_entry(e, &cache->unknown, list) {
+ if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
+ bdaddr_t *bdaddr,
+ int state)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
+
+ list_for_each_entry(e, &cache->resolve, list) {
+ if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
+ return e;
+ if (!bacmp(&e->data.bdaddr, bdaddr))
+ return e;
+ }
+
+ return NULL;
+}
+
+void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
+ struct inquiry_entry *ie)
+{
+ struct discovery_state *cache = &hdev->discovery;
+ struct list_head *pos = &cache->resolve;
+ struct inquiry_entry *p;
+
+ list_del(&ie->list);
+
+ list_for_each_entry(p, &cache->resolve, list) {
+ if (p->name_state != NAME_PENDING &&
+ abs(p->data.rssi) >= abs(ie->data.rssi))
break;
- return e;
+ pos = &p->list;
+ }
+
+ list_add(&ie->list, pos);
}
-void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
+bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+ bool name_known, bool *ssp)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *ie;
- BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
+ BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
+
+ hci_remove_remote_oob_data(hdev, &data->bdaddr);
+
+ *ssp = data->ssp_mode;
ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
- if (!ie) {
- /* Entry not in the cache. Add new one. */
- ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
- if (!ie)
- return;
+ if (ie) {
+ if (ie->data.ssp_mode)
+ *ssp = true;
+
+ if (ie->name_state == NAME_NEEDED &&
+ data->rssi != ie->data.rssi) {
+ ie->data.rssi = data->rssi;
+ hci_inquiry_cache_update_resolve(hdev, ie);
+ }
- ie->next = cache->list;
- cache->list = ie;
+ goto update;
+ }
+
+ /* Entry not in the cache. Add new one. */
+ ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+ if (!ie)
+ return false;
+
+ list_add(&ie->all, &cache->all);
+
+ if (name_known) {
+ ie->name_state = NAME_KNOWN;
+ } else {
+ ie->name_state = NAME_NOT_KNOWN;
+ list_add(&ie->list, &cache->unknown);
+ }
+
+update:
+ if (name_known && ie->name_state != NAME_KNOWN &&
+ ie->name_state != NAME_PENDING) {
+ ie->name_state = NAME_KNOWN;
+ list_del(&ie->list);
}
memcpy(&ie->data, data, sizeof(*data));
ie->timestamp = jiffies;
cache->timestamp = jiffies;
+
+ if (ie->name_state == NAME_NOT_KNOWN)
+ return false;
+
+ return true;
}
static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
- struct inquiry_cache *cache = &hdev->inq_cache;
+ struct discovery_state *cache = &hdev->discovery;
struct inquiry_info *info = (struct inquiry_info *) buf;
struct inquiry_entry *e;
int copied = 0;
- for (e = cache->list; e && copied < num; e = e->next, copied++) {
+ list_for_each_entry(e, &cache->all, all) {
struct inquiry_data *data = &e->data;
+
+ if (copied >= num)
+ break;
+
bacpy(&info->bdaddr, &data->bdaddr);
info->pscan_rep_mode = data->pscan_rep_mode;
info->pscan_period_mode = data->pscan_period_mode;
info->pscan_mode = data->pscan_mode;
memcpy(info->dev_class, data->dev_class, 3);
info->clock_offset = data->clock_offset;
+
info++;
+ copied++;
}
BT_DBG("cache %p, copied %d", cache, copied);
return copied;
}
-static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
+static void hci_inq_req(struct hci_request *req, unsigned long opt)
{
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
+ struct hci_dev *hdev = req->hdev;
struct hci_cp_inquiry cp;
BT_DBG("%s", hdev->name);
@@ -414,7 +2183,13 @@ static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
memcpy(&cp.lap, &ir->lap, 3);
cp.length = ir->length;
cp.num_rsp = ir->num_rsp;
- hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+static int wait_inquiry(void *word)
+{
+ schedule();
+ return signal_pending(current);
}
int hci_inquiry(void __user *arg)
@@ -429,48 +2204,73 @@ int hci_inquiry(void __user *arg)
if (copy_from_user(&ir, ptr, sizeof(ir)))
return -EFAULT;
- if (!(hdev = hci_dev_get(ir.dev_id)))
+ hdev = hci_dev_get(ir.dev_id);
+ if (!hdev)
return -ENODEV;
- hci_dev_lock_bh(hdev);
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ if (hdev->dev_type != HCI_BREDR) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
- inquiry_cache_empty(hdev) ||
- ir.flags & IREQ_CACHE_FLUSH) {
- inquiry_cache_flush(hdev);
+ inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
+ hci_inquiry_cache_flush(hdev);
do_inquiry = 1;
}
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
if (do_inquiry) {
- err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
+ err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
+ timeo);
if (err < 0)
goto done;
+
+ /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
+ * cleared). If it is interrupted by a signal, return -EINTR.
+ */
+ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
+ TASK_INTERRUPTIBLE))
+ return -EINTR;
}
- /* for unlimited number of responses we will use buffer with 255 entries */
+ /* for unlimited number of responses we will use buffer with
+ * 255 entries
+ */
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
- buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
+ buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto done;
}
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
ptr += sizeof(ir);
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
- ir.num_rsp))
+ ir.num_rsp))
err = -EFAULT;
} else
err = -EFAULT;
@@ -482,62 +2282,93 @@ done:
return err;
}
-/* ---- HCI ioctl helpers ---- */
-
-int hci_dev_open(__u16 dev)
+static int hci_dev_do_open(struct hci_dev *hdev)
{
- struct hci_dev *hdev;
int ret = 0;
- if (!(hdev = hci_dev_get(dev)))
- return -ENODEV;
-
BT_DBG("%s %p", hdev->name, hdev);
hci_req_lock(hdev);
- if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
- ret = -ERFKILL;
+ if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
+ ret = -ENODEV;
goto done;
}
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ /* Check for rfkill but allow the HCI setup stage to
+ * proceed (which in itself doesn't cause any RF activity).
+ */
+ if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+ ret = -ERFKILL;
+ goto done;
+ }
+
+ /* Check for valid public address or a configured static
+ * random adddress, but let the HCI setup proceed to
+ * be able to determine if there is a public address
+ * or not.
+ *
+ * In case of user channel usage, it is not important
+ * if a public address or static random address is
+ * available.
+ *
+ * This check is only valid for BR/EDR controllers
+ * since AMP controllers do not have an address.
+ */
+ if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+ hdev->dev_type == HCI_BREDR &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+ ret = -EADDRNOTAVAIL;
+ goto done;
+ }
+ }
+
if (test_bit(HCI_UP, &hdev->flags)) {
ret = -EALREADY;
goto done;
}
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
- set_bit(HCI_RAW, &hdev->flags);
-
- /* Treat all non BR/EDR controllers as raw devices for now */
- if (hdev->dev_type != HCI_BREDR)
- set_bit(HCI_RAW, &hdev->flags);
-
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
- if (!test_bit(HCI_RAW, &hdev->flags)) {
- atomic_set(&hdev->cmd_cnt, 1);
- set_bit(HCI_INIT, &hdev->flags);
+ atomic_set(&hdev->cmd_cnt, 1);
+ set_bit(HCI_INIT, &hdev->flags);
- //__hci_request(hdev, hci_reset_req, 0, HZ);
- ret = __hci_request(hdev, hci_init_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
+ ret = hdev->setup(hdev);
- clear_bit(HCI_INIT, &hdev->flags);
+ if (!ret) {
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ set_bit(HCI_RAW, &hdev->flags);
+
+ if (!test_bit(HCI_RAW, &hdev->flags) &&
+ !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ ret = __hci_init(hdev);
}
+ clear_bit(HCI_INIT, &hdev->flags);
+
if (!ret) {
hci_dev_hold(hdev);
+ set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+ !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+ hdev->dev_type == HCI_BREDR) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 1);
+ hci_dev_unlock(hdev);
+ }
} else {
/* Init failed, cleanup */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
- tasklet_kill(&hdev->cmd_task);
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->cmd_work);
+ flush_work(&hdev->rx_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
@@ -556,30 +2387,80 @@ int hci_dev_open(__u16 dev)
done:
hci_req_unlock(hdev);
- hci_dev_put(hdev);
return ret;
}
+/* ---- HCI ioctl helpers ---- */
+
+int hci_dev_open(__u16 dev)
+{
+ struct hci_dev *hdev;
+ int err;
+
+ hdev = hci_dev_get(dev);
+ if (!hdev)
+ return -ENODEV;
+
+ /* We need to ensure that no other power on/off work is pending
+ * before proceeding to call hci_dev_do_open. This is
+ * particularly important if the setup procedure has not yet
+ * completed.
+ */
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->power_off);
+
+ /* After this call it is guaranteed that the setup procedure
+ * has finished. This means that error conditions like RFKILL
+ * or no valid public or static random address apply.
+ */
+ flush_workqueue(hdev->req_workqueue);
+
+ err = hci_dev_do_open(hdev);
+
+ hci_dev_put(hdev);
+
+ return err;
+}
+
static int hci_dev_do_close(struct hci_dev *hdev)
{
BT_DBG("%s %p", hdev->name, hdev);
+ cancel_delayed_work(&hdev->power_off);
+
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
+ del_timer_sync(&hdev->cmd_timer);
hci_req_unlock(hdev);
return 0;
}
- /* Kill RX and TX tasks */
- tasklet_kill(&hdev->rx_task);
- tasklet_kill(&hdev->tx_task);
+ /* Flush RX and TX works */
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->rx_work);
+
+ if (hdev->discov_timeout > 0) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = 0;
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->service_cache);
- hci_dev_lock_bh(hdev);
- inquiry_cache_flush(hdev);
+ cancel_delayed_work_sync(&hdev->le_scan_disable);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ cancel_delayed_work_sync(&hdev->rpa_expired);
+
+ hci_dev_lock(hdev);
+ hci_inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_pend_le_conns_clear(hdev);
+ hci_dev_unlock(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
@@ -589,15 +2470,16 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
- if (!test_bit(HCI_RAW, &hdev->flags)) {
+ if (!test_bit(HCI_RAW, &hdev->flags) &&
+ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+ test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
- __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(250));
+ __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
}
- /* Kill cmd task */
- tasklet_kill(&hdev->cmd_task);
+ /* flush cmd work */
+ flush_work(&hdev->cmd_work);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
@@ -606,16 +2488,36 @@ static int hci_dev_do_close(struct hci_dev *hdev)
/* Drop last sent command */
if (hdev->sent_cmd) {
+ del_timer_sync(&hdev->cmd_timer);
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
+ kfree_skb(hdev->recv_evt);
+ hdev->recv_evt = NULL;
+
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
/* Clear flags */
hdev->flags = 0;
+ hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
+
+ if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ if (hdev->dev_type == HCI_BREDR) {
+ hci_dev_lock(hdev);
+ mgmt_powered(hdev, 0);
+ hci_dev_unlock(hdev);
+ }
+ }
+
+ /* Controller radio is available but is currently powered down */
+ hdev->amp_status = AMP_STATUS_POWERED_DOWN;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ bacpy(&hdev->random_addr, BDADDR_ANY);
hci_req_unlock(hdev);
@@ -631,7 +2533,18 @@ int hci_dev_close(__u16 dev)
hdev = hci_dev_get(dev);
if (!hdev)
return -ENODEV;
+
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->power_off);
+
err = hci_dev_do_close(hdev);
+
+done:
hci_dev_put(hdev);
return err;
}
@@ -646,32 +2559,36 @@ int hci_dev_reset(__u16 dev)
return -ENODEV;
hci_req_lock(hdev);
- tasklet_disable(&hdev->tx_task);
- if (!test_bit(HCI_UP, &hdev->flags))
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ ret = -ENETDOWN;
+ goto done;
+ }
+
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ ret = -EBUSY;
goto done;
+ }
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
- hci_dev_lock_bh(hdev);
- inquiry_cache_flush(hdev);
+ hci_dev_lock(hdev);
+ hci_inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
if (hdev->flush)
hdev->flush(hdev);
atomic_set(&hdev->cmd_cnt, 1);
- hdev->acl_cnt = 0; hdev->sco_cnt = 0;
+ hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
if (!test_bit(HCI_RAW, &hdev->flags))
- ret = __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
done:
- tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
@@ -686,10 +2603,15 @@ int hci_dev_reset_stat(__u16 dev)
if (!hdev)
return -ENODEV;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+done:
hci_dev_put(hdev);
-
return ret;
}
@@ -706,10 +2628,25 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!hdev)
return -ENODEV;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EBUSY;
+ goto done;
+ }
+
+ if (hdev->dev_type != HCI_BREDR) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
switch (cmd) {
case HCISETAUTH:
- err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETENCRYPT:
@@ -720,24 +2657,24 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
- err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
if (err)
break;
}
- err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETSCAN:
- err = hci_request(hdev, hci_scan_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKPOL:
- err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKMODE:
@@ -764,15 +2701,16 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
break;
}
+done:
hci_dev_put(hdev);
return err;
}
int hci_get_dev_list(void __user *arg)
{
+ struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
- struct list_head *p;
int n = 0, size, err;
__u16 dev_num;
@@ -790,16 +2728,21 @@ int hci_get_dev_list(void __user *arg)
dr = dl->dev_req;
- read_lock_bh(&hci_dev_list_lock);
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *hdev;
- hdev = list_entry(p, struct hci_dev, list);
+ read_lock(&hci_dev_list_lock);
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work(&hdev->power_off);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
+
if (++n >= dev_num)
break;
}
- read_unlock_bh(&hci_dev_list_lock);
+ read_unlock(&hci_dev_list_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*dr);
@@ -823,15 +2766,28 @@ int hci_get_dev_info(void __user *arg)
if (!hdev)
return -ENODEV;
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+ cancel_delayed_work_sync(&hdev->power_off);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
- di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
+ di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
di.flags = hdev->flags;
di.pkt_type = hdev->pkt_type;
- di.acl_mtu = hdev->acl_mtu;
- di.acl_pkts = hdev->acl_pkts;
- di.sco_mtu = hdev->sco_mtu;
- di.sco_pkts = hdev->sco_pkts;
+ if (lmp_bredr_capable(hdev)) {
+ di.acl_mtu = hdev->acl_mtu;
+ di.acl_pkts = hdev->acl_pkts;
+ di.sco_mtu = hdev->sco_mtu;
+ di.sco_pkts = hdev->sco_pkts;
+ } else {
+ di.acl_mtu = hdev->le_mtu;
+ di.acl_pkts = hdev->le_pkts;
+ di.sco_mtu = 0;
+ di.sco_pkts = 0;
+ }
di.link_policy = hdev->link_policy;
di.link_mode = hdev->link_mode;
@@ -854,10 +2810,16 @@ static int hci_rfkill_set_block(void *data, bool blocked)
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
- if (!blocked)
- return 0;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ return -EBUSY;
- hci_dev_do_close(hdev);
+ if (blocked) {
+ set_bit(HCI_RFKILLED, &hdev->dev_flags);
+ if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+ hci_dev_do_close(hdev);
+ } else {
+ clear_bit(HCI_RFKILLED, &hdev->dev_flags);
+ }
return 0;
}
@@ -866,102 +2828,1145 @@ static const struct rfkill_ops hci_rfkill_ops = {
.set_block = hci_rfkill_set_block,
};
-/* Alloc HCI device */
-struct hci_dev *hci_alloc_dev(void)
+static void hci_power_on(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ err = hci_dev_do_open(hdev);
+ if (err < 0) {
+ mgmt_set_powered_failed(hdev, err);
+ return;
+ }
+
+ /* During the HCI setup phase, a few error conditions are
+ * ignored and they need to be checked now. If they are still
+ * valid, it is important to turn the device back off.
+ */
+ if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
+ (hdev->dev_type == HCI_BREDR &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ hci_dev_do_close(hdev);
+ } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+ HCI_AUTO_OFF_TIMEOUT);
+ }
+
+ if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
+ mgmt_index_added(hdev);
+}
+
+static void hci_power_off(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ power_off.work);
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_do_close(hdev);
+}
+
+static void hci_discov_off(struct work_struct *work)
{
struct hci_dev *hdev;
- hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
- if (!hdev)
+ hdev = container_of(work, struct hci_dev, discov_off.work);
+
+ BT_DBG("%s", hdev->name);
+
+ mgmt_discoverable_timeout(hdev);
+}
+
+void hci_uuids_clear(struct hci_dev *hdev)
+{
+ struct bt_uuid *uuid, *tmp;
+
+ list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
+ list_del(&uuid->list);
+ kfree(uuid);
+ }
+}
+
+void hci_link_keys_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->link_keys) {
+ struct link_key *key;
+
+ key = list_entry(p, struct link_key, list);
+
+ list_del(p);
+ kfree(key);
+ }
+}
+
+void hci_smp_ltks_clear(struct hci_dev *hdev)
+{
+ struct smp_ltk *k, *tmp;
+
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ list_del(&k->list);
+ kfree(k);
+ }
+}
+
+void hci_smp_irks_clear(struct hci_dev *hdev)
+{
+ struct smp_irk *k, *tmp;
+
+ list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+ list_del(&k->list);
+ kfree(k);
+ }
+}
+
+struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct link_key *k;
+
+ list_for_each_entry(k, &hdev->link_keys, list)
+ if (bacmp(bdaddr, &k->bdaddr) == 0)
+ return k;
+
+ return NULL;
+}
+
+static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 key_type, u8 old_key_type)
+{
+ /* Legacy key */
+ if (key_type < 0x03)
+ return true;
+
+ /* Debug keys are insecure so don't store them persistently */
+ if (key_type == HCI_LK_DEBUG_COMBINATION)
+ return false;
+
+ /* Changed combination key and there's no previous one */
+ if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
+ return false;
+
+ /* Security mode 3 case */
+ if (!conn)
+ return true;
+
+ /* Neither local nor remote side had no-bonding as requirement */
+ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
+ return true;
+
+ /* Local side had dedicated bonding as requirement */
+ if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
+ return true;
+
+ /* Remote side had dedicated bonding as requirement */
+ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
+ return true;
+
+ /* If none of the above criteria match, then don't store the key
+ * persistently */
+ return false;
+}
+
+static bool ltk_type_master(u8 type)
+{
+ if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
+ return true;
+
+ return false;
+}
+
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
+ bool master)
+{
+ struct smp_ltk *k;
+
+ list_for_each_entry(k, &hdev->long_term_keys, list) {
+ if (k->ediv != ediv || k->rand != rand)
+ continue;
+
+ if (ltk_type_master(k->type) != master)
+ continue;
+
+ return k;
+ }
+
+ return NULL;
+}
+
+struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, bool master)
+{
+ struct smp_ltk *k;
+
+ list_for_each_entry(k, &hdev->long_term_keys, list)
+ if (addr_type == k->bdaddr_type &&
+ bacmp(bdaddr, &k->bdaddr) == 0 &&
+ ltk_type_master(k->type) == master)
+ return k;
+
+ return NULL;
+}
+
+struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
+{
+ struct smp_irk *irk;
+
+ list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+ if (!bacmp(&irk->rpa, rpa))
+ return irk;
+ }
+
+ list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+ if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
+ bacpy(&irk->rpa, rpa);
+ return irk;
+ }
+ }
+
+ return NULL;
+}
+
+struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type)
+{
+ struct smp_irk *irk;
+
+ /* Identity Address must be public or static random */
+ if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
return NULL;
- skb_queue_head_init(&hdev->driver_init);
+ list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+ if (addr_type == irk->addr_type &&
+ bacmp(bdaddr, &irk->bdaddr) == 0)
+ return irk;
+ }
- return hdev;
+ return NULL;
}
-EXPORT_SYMBOL(hci_alloc_dev);
-/* Free HCI device */
-void hci_free_dev(struct hci_dev *hdev)
+int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
+ bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
{
- skb_queue_purge(&hdev->driver_init);
+ struct link_key *key, *old_key;
+ u8 old_key_type;
+ bool persistent;
+
+ old_key = hci_find_link_key(hdev, bdaddr);
+ if (old_key) {
+ old_key_type = old_key->type;
+ key = old_key;
+ } else {
+ old_key_type = conn ? conn->key_type : 0xff;
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+ list_add(&key->list, &hdev->link_keys);
+ }
- /* will free via device release */
- put_device(&hdev->dev);
+ BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
+
+ /* Some buggy controller combinations generate a changed
+ * combination key for legacy pairing even when there's no
+ * previous key */
+ if (type == HCI_LK_CHANGED_COMBINATION &&
+ (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
+ type = HCI_LK_COMBINATION;
+ if (conn)
+ conn->key_type = type;
+ }
+
+ bacpy(&key->bdaddr, bdaddr);
+ memcpy(key->val, val, HCI_LINK_KEY_SIZE);
+ key->pin_len = pin_len;
+
+ if (type == HCI_LK_CHANGED_COMBINATION)
+ key->type = old_key_type;
+ else
+ key->type = type;
+
+ if (!new_key)
+ return 0;
+
+ persistent = hci_persistent_key(hdev, conn, type, old_key_type);
+
+ mgmt_new_link_key(hdev, key, persistent);
+
+ if (conn)
+ conn->flush_key = !persistent;
+
+ return 0;
}
-EXPORT_SYMBOL(hci_free_dev);
-/* Register HCI device */
-int hci_register_dev(struct hci_dev *hdev)
+struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 type, u8 authenticated,
+ u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
+{
+ struct smp_ltk *key, *old_key;
+ bool master = ltk_type_master(type);
+
+ old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
+ if (old_key)
+ key = old_key;
+ else {
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return NULL;
+ list_add(&key->list, &hdev->long_term_keys);
+ }
+
+ bacpy(&key->bdaddr, bdaddr);
+ key->bdaddr_type = addr_type;
+ memcpy(key->val, tk, sizeof(key->val));
+ key->authenticated = authenticated;
+ key->ediv = ediv;
+ key->rand = rand;
+ key->enc_size = enc_size;
+ key->type = type;
+
+ return key;
+}
+
+struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, u8 val[16], bdaddr_t *rpa)
+{
+ struct smp_irk *irk;
+
+ irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
+ if (!irk) {
+ irk = kzalloc(sizeof(*irk), GFP_KERNEL);
+ if (!irk)
+ return NULL;
+
+ bacpy(&irk->bdaddr, bdaddr);
+ irk->addr_type = addr_type;
+
+ list_add(&irk->list, &hdev->identity_resolving_keys);
+ }
+
+ memcpy(irk->val, val, 16);
+ bacpy(&irk->rpa, rpa);
+
+ return irk;
+}
+
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct link_key *key;
+
+ key = hci_find_link_key(hdev, bdaddr);
+ if (!key)
+ return -ENOENT;
+
+ BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+ list_del(&key->list);
+ kfree(key);
+
+ return 0;
+}
+
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
+{
+ struct smp_ltk *k, *tmp;
+ int removed = 0;
+
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+ if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
+ continue;
+
+ BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+ list_del(&k->list);
+ kfree(k);
+ removed++;
+ }
+
+ return removed ? 0 : -ENOENT;
+}
+
+void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
+{
+ struct smp_irk *k, *tmp;
+
+ list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+ if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
+ continue;
+
+ BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+ list_del(&k->list);
+ kfree(k);
+ }
+}
+
+/* HCI command timer function */
+static void hci_cmd_timeout(unsigned long arg)
+{
+ struct hci_dev *hdev = (void *) arg;
+
+ if (hdev->sent_cmd) {
+ struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
+
+ BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
+ } else {
+ BT_ERR("%s command tx timeout", hdev->name);
+ }
+
+ atomic_set(&hdev->cmd_cnt, 1);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+}
+
+struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
+{
+ struct oob_data *data;
+
+ list_for_each_entry(data, &hdev->remote_oob_data, list)
+ if (bacmp(bdaddr, &data->bdaddr) == 0)
+ return data;
+
+ return NULL;
+}
+
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ struct oob_data *data;
+
+ data = hci_find_remote_oob_data(hdev, bdaddr);
+ if (!data)
+ return -ENOENT;
+
+ BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+ list_del(&data->list);
+ kfree(data);
+
+ return 0;
+}
+
+void hci_remote_oob_data_clear(struct hci_dev *hdev)
+{
+ struct oob_data *data, *n;
+
+ list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
+ list_del(&data->list);
+ kfree(data);
+ }
+}
+
+int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *hash, u8 *randomizer)
+{
+ struct oob_data *data;
+
+ data = hci_find_remote_oob_data(hdev, bdaddr);
+ if (!data) {
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ bacpy(&data->bdaddr, bdaddr);
+ list_add(&data->list, &hdev->remote_oob_data);
+ }
+
+ memcpy(data->hash192, hash, sizeof(data->hash192));
+ memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
+
+ memset(data->hash256, 0, sizeof(data->hash256));
+ memset(data->randomizer256, 0, sizeof(data->randomizer256));
+
+ BT_DBG("%s for %pMR", hdev->name, bdaddr);
+
+ return 0;
+}
+
+int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *hash192, u8 *randomizer192,
+ u8 *hash256, u8 *randomizer256)
+{
+ struct oob_data *data;
+
+ data = hci_find_remote_oob_data(hdev, bdaddr);
+ if (!data) {
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ bacpy(&data->bdaddr, bdaddr);
+ list_add(&data->list, &hdev->remote_oob_data);
+ }
+
+ memcpy(data->hash192, hash192, sizeof(data->hash192));
+ memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
+
+ memcpy(data->hash256, hash256, sizeof(data->hash256));
+ memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
+
+ BT_DBG("%s for %pMR", hdev->name, bdaddr);
+
+ return 0;
+}
+
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 type)
+{
+ struct bdaddr_list *b;
+
+ list_for_each_entry(b, &hdev->blacklist, list) {
+ if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
+ return b;
+ }
+
+ return NULL;
+}
+
+static void hci_blacklist_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->blacklist) {
+ struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
+
+ list_del(p);
+ kfree(b);
+ }
+}
+
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct bdaddr_list *entry;
+
+ if (!bacmp(bdaddr, BDADDR_ANY))
+ return -EBADF;
+
+ if (hci_blacklist_lookup(hdev, bdaddr, type))
+ return -EEXIST;
+
+ entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ bacpy(&entry->bdaddr, bdaddr);
+ entry->bdaddr_type = type;
+
+ list_add(&entry->list, &hdev->blacklist);
+
+ return mgmt_device_blocked(hdev, bdaddr, type);
+}
+
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct bdaddr_list *entry;
+
+ if (!bacmp(bdaddr, BDADDR_ANY)) {
+ hci_blacklist_clear(hdev);
+ return 0;
+ }
+
+ entry = hci_blacklist_lookup(hdev, bdaddr, type);
+ if (!entry)
+ return -ENOENT;
+
+ list_del(&entry->list);
+ kfree(entry);
+
+ return mgmt_device_unblocked(hdev, bdaddr, type);
+}
+
+struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 type)
+{
+ struct bdaddr_list *b;
+
+ list_for_each_entry(b, &hdev->le_white_list, list) {
+ if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
+ return b;
+ }
+
+ return NULL;
+}
+
+void hci_white_list_clear(struct hci_dev *hdev)
+{
+ struct list_head *p, *n;
+
+ list_for_each_safe(p, n, &hdev->le_white_list) {
+ struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
+
+ list_del(p);
+ kfree(b);
+ }
+}
+
+int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
- struct list_head *head = &hci_dev_list, *p;
- int i, id = 0;
+ struct bdaddr_list *entry;
+
+ if (!bacmp(bdaddr, BDADDR_ANY))
+ return -EBADF;
+
+ entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ bacpy(&entry->bdaddr, bdaddr);
+ entry->bdaddr_type = type;
+
+ list_add(&entry->list, &hdev->le_white_list);
+
+ return 0;
+}
+
+int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct bdaddr_list *entry;
+
+ if (!bacmp(bdaddr, BDADDR_ANY))
+ return -EBADF;
+
+ entry = hci_white_list_lookup(hdev, bdaddr, type);
+ if (!entry)
+ return -ENOENT;
+
+ list_del(&entry->list);
+ kfree(entry);
+
+ return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type)
+{
+ struct hci_conn_params *params;
+
+ list_for_each_entry(params, &hdev->le_conn_params, list) {
+ if (bacmp(&params->addr, addr) == 0 &&
+ params->addr_type == addr_type) {
+ return params;
+ }
+ }
+
+ return NULL;
+}
+
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+ if (!conn)
+ return false;
+
+ if (conn->dst_type != type)
+ return false;
- BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
- hdev->bus, hdev->owner);
+ if (conn->state != BT_CONNECTED)
+ return false;
- if (!hdev->open || !hdev->close || !hdev->destruct)
+ return true;
+}
+
+static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
+{
+ if (addr_type == ADDR_LE_DEV_PUBLIC)
+ return true;
+
+ /* Check for Random Static address type */
+ if ((addr->b[5] & 0xc0) == 0xc0)
+ return true;
+
+ return false;
+}
+
+/* This function requires the caller holds hdev->lock */
+int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
+ u8 auto_connect, u16 conn_min_interval,
+ u16 conn_max_interval)
+{
+ struct hci_conn_params *params;
+
+ if (!is_identity_address(addr, addr_type))
return -EINVAL;
- write_lock_bh(&hci_dev_list_lock);
+ params = hci_conn_params_lookup(hdev, addr, addr_type);
+ if (params)
+ goto update;
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- if (list_entry(p, struct hci_dev, list)->id != id)
- break;
- head = p; id++;
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ BT_ERR("Out of memory");
+ return -ENOMEM;
}
- sprintf(hdev->name, "hci%d", id);
- hdev->id = id;
- list_add(&hdev->list, head);
+ bacpy(&params->addr, addr);
+ params->addr_type = addr_type;
- atomic_set(&hdev->refcnt, 1);
- spin_lock_init(&hdev->lock);
+ list_add(&params->list, &hdev->le_conn_params);
+
+update:
+ params->conn_min_interval = conn_min_interval;
+ params->conn_max_interval = conn_max_interval;
+ params->auto_connect = auto_connect;
+
+ switch (auto_connect) {
+ case HCI_AUTO_CONN_DISABLED:
+ case HCI_AUTO_CONN_LINK_LOSS:
+ hci_pend_le_conn_del(hdev, addr, addr_type);
+ break;
+ case HCI_AUTO_CONN_ALWAYS:
+ if (!is_connected(hdev, addr, addr_type))
+ hci_pend_le_conn_add(hdev, addr, addr_type);
+ break;
+ }
+
+ BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
+ "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
+ conn_min_interval, conn_max_interval);
+
+ return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
+{
+ struct hci_conn_params *params;
+
+ params = hci_conn_params_lookup(hdev, addr, addr_type);
+ if (!params)
+ return;
+
+ hci_pend_le_conn_del(hdev, addr, addr_type);
+
+ list_del(&params->list);
+ kfree(params);
+
+ BT_DBG("addr %pMR (type %u)", addr, addr_type);
+}
+
+/* This function requires the caller holds hdev->lock */
+void hci_conn_params_clear(struct hci_dev *hdev)
+{
+ struct hci_conn_params *params, *tmp;
+
+ list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+ list_del(&params->list);
+ kfree(params);
+ }
+
+ BT_DBG("All LE connection parameters were removed");
+}
+
+/* This function requires the caller holds hdev->lock */
+struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
+ bdaddr_t *addr, u8 addr_type)
+{
+ struct bdaddr_list *entry;
+
+ list_for_each_entry(entry, &hdev->pend_le_conns, list) {
+ if (bacmp(&entry->bdaddr, addr) == 0 &&
+ entry->bdaddr_type == addr_type)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* This function requires the caller holds hdev->lock */
+void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
+{
+ struct bdaddr_list *entry;
+
+ entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
+ if (entry)
+ goto done;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ BT_ERR("Out of memory");
+ return;
+ }
+
+ bacpy(&entry->bdaddr, addr);
+ entry->bdaddr_type = addr_type;
+
+ list_add(&entry->list, &hdev->pend_le_conns);
+
+ BT_DBG("addr %pMR (type %u)", addr, addr_type);
+
+done:
+ hci_update_background_scan(hdev);
+}
+
+/* This function requires the caller holds hdev->lock */
+void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
+{
+ struct bdaddr_list *entry;
+
+ entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
+ if (!entry)
+ goto done;
+
+ list_del(&entry->list);
+ kfree(entry);
+
+ BT_DBG("addr %pMR (type %u)", addr, addr_type);
+
+done:
+ hci_update_background_scan(hdev);
+}
+
+/* This function requires the caller holds hdev->lock */
+void hci_pend_le_conns_clear(struct hci_dev *hdev)
+{
+ struct bdaddr_list *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ BT_DBG("All LE pending connections cleared");
+}
+
+static void inquiry_complete(struct hci_dev *hdev, u8 status)
+{
+ if (status) {
+ BT_ERR("Failed to start inquiry: status %d", status);
+
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
+ return;
+ }
+}
+
+static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
+{
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ struct hci_request req;
+ struct hci_cp_inquiry cp;
+ int err;
+
+ if (status) {
+ BT_ERR("Failed to disable LE scanning: status %d", status);
+ return;
+ }
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_LE:
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
+ break;
+
+ case DISCOV_TYPE_INTERLEAVED:
+ hci_req_init(&req, hdev);
+
+ memset(&cp, 0, sizeof(cp));
+ memcpy(&cp.lap, lap, sizeof(cp.lap));
+ cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
+ hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
+
+ hci_dev_lock(hdev);
+
+ hci_inquiry_cache_flush(hdev);
+
+ err = hci_req_run(&req, inquiry_complete);
+ if (err) {
+ BT_ERR("Inquiry request failed: err %d", err);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ }
+
+ hci_dev_unlock(hdev);
+ break;
+ }
+}
+
+static void le_scan_disable_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ le_scan_disable.work);
+ struct hci_request req;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_req_init(&req, hdev);
+
+ hci_req_add_le_scan_disable(&req);
+
+ err = hci_req_run(&req, le_scan_disable_work_complete);
+ if (err)
+ BT_ERR("Disable LE scanning request failed: err %d", err);
+}
+
+static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* If we're advertising or initiating an LE connection we can't
+ * go ahead and change the random address at this time. This is
+ * because the eventual initiator address used for the
+ * subsequently created connection will be undefined (some
+ * controllers use the new address and others the one we had
+ * when the operation started).
+ *
+ * In this kind of scenario skip the update and let the random
+ * address be updated at the next cycle.
+ */
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
+ hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+ BT_DBG("Deferring random address update");
+ return;
+ }
+
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
+}
+
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+ u8 *own_addr_type)
+{
+ struct hci_dev *hdev = req->hdev;
+ int err;
+
+ /* If privacy is enabled use a resolvable private address. If
+ * current RPA has expired or there is something else than
+ * the current RPA in use, then generate a new one.
+ */
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+ int to;
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
+ !bacmp(&hdev->random_addr, &hdev->rpa))
+ return 0;
+
+ err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
+ if (err < 0) {
+ BT_ERR("%s failed to generate new RPA", hdev->name);
+ return err;
+ }
+
+ set_random_addr(req, &hdev->rpa);
+
+ to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
+
+ return 0;
+ }
+
+ /* In case of required privacy without resolvable private address,
+ * use an unresolvable private address. This is useful for active
+ * scanning and non-connectable advertising.
+ */
+ if (require_privacy) {
+ bdaddr_t urpa;
+
+ get_random_bytes(&urpa, 6);
+ urpa.b[5] &= 0x3f; /* Clear two most significant bits */
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ set_random_addr(req, &urpa);
+ return 0;
+ }
+
+ /* If forcing static address is in use or there is no public
+ * address use the static address as random address (but skip
+ * the HCI command if the current random address is already the
+ * static one.
+ */
+ if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ if (bacmp(&hdev->static_addr, &hdev->random_addr))
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+ &hdev->static_addr);
+ return 0;
+ }
+
+ /* Neither privacy nor static address is being used so use a
+ * public address.
+ */
+ *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ return 0;
+}
+
+/* Copy the Identity Address of the controller.
+ *
+ * If the controller has a public BD_ADDR, then by default use that one.
+ * If this is a LE only controller without a public address, default to
+ * the static random address.
+ *
+ * For debugging purposes it is possible to force controllers with a
+ * public address to use the static random address instead.
+ */
+void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 *bdaddr_type)
+{
+ if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+ bacpy(bdaddr, &hdev->static_addr);
+ *bdaddr_type = ADDR_LE_DEV_RANDOM;
+ } else {
+ bacpy(bdaddr, &hdev->bdaddr);
+ *bdaddr_type = ADDR_LE_DEV_PUBLIC;
+ }
+}
+
+/* Alloc HCI device */
+struct hci_dev *hci_alloc_dev(void)
+{
+ struct hci_dev *hdev;
+
+ hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
+ if (!hdev)
+ return NULL;
- hdev->flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
+ hdev->num_iac = 0x01; /* One IAC support is mandatory */
+ hdev->io_capability = 0x03; /* No Input No Output */
+ hdev->inq_tx_power = HCI_TX_POWER_INVALID;
+ hdev->adv_tx_power = HCI_TX_POWER_INVALID;
- hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
- tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
- tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
- tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
+ hdev->le_adv_channel_map = 0x07;
+ hdev->le_scan_interval = 0x0060;
+ hdev->le_scan_window = 0x0030;
+ hdev->le_conn_min_interval = 0x0028;
+ hdev->le_conn_max_interval = 0x0038;
+
+ hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
+ hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
+ hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
+ hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
+
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+ INIT_LIST_HEAD(&hdev->link_keys);
+ INIT_LIST_HEAD(&hdev->long_term_keys);
+ INIT_LIST_HEAD(&hdev->identity_resolving_keys);
+ INIT_LIST_HEAD(&hdev->remote_oob_data);
+ INIT_LIST_HEAD(&hdev->le_white_list);
+ INIT_LIST_HEAD(&hdev->le_conn_params);
+ INIT_LIST_HEAD(&hdev->pend_le_conns);
+ INIT_LIST_HEAD(&hdev->conn_hash.list);
+
+ INIT_WORK(&hdev->rx_work, hci_rx_work);
+ INIT_WORK(&hdev->cmd_work, hci_cmd_work);
+ INIT_WORK(&hdev->tx_work, hci_tx_work);
+ INIT_WORK(&hdev->power_on, hci_power_on);
+
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
- for (i = 0; i < NUM_REASSEMBLY; i++)
- hdev->reassembly[i] = NULL;
-
init_waitqueue_head(&hdev->req_wait_q);
- mutex_init(&hdev->req_lock);
- inquiry_cache_init(hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
- hci_conn_hash_init(hdev);
+ hci_init_sysfs(hdev);
+ discovery_init(hdev);
- INIT_LIST_HEAD(&hdev->blacklist);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_alloc_dev);
- memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+/* Free HCI device */
+void hci_free_dev(struct hci_dev *hdev)
+{
+ /* will free via device release */
+ put_device(&hdev->dev);
+}
+EXPORT_SYMBOL(hci_free_dev);
- atomic_set(&hdev->promisc, 0);
+/* Register HCI device */
+int hci_register_dev(struct hci_dev *hdev)
+{
+ int id, error;
- write_unlock_bh(&hci_dev_list_lock);
+ if (!hdev->open || !hdev->close)
+ return -EINVAL;
- hdev->workqueue = create_singlethread_workqueue(hdev->name);
- if (!hdev->workqueue)
- goto nomem;
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
+ break;
+ case HCI_AMP:
+ id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
+ break;
+ default:
+ return -EINVAL;
+ }
- hci_register_sysfs(hdev);
+ if (id < 0)
+ return id;
+
+ sprintf(hdev->name, "hci%d", id);
+ hdev->id = id;
+
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+
+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1, hdev->name);
+ if (!hdev->workqueue) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1, hdev->name);
+ if (!hdev->req_workqueue) {
+ destroy_workqueue(hdev->workqueue);
+ error = -ENOMEM;
+ goto err;
+ }
+
+ if (!IS_ERR_OR_NULL(bt_debugfs))
+ hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
+
+ dev_set_name(&hdev->dev, "%s", hdev->name);
+
+ hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hdev->tfm_aes)) {
+ BT_ERR("Unable to create crypto context");
+ error = PTR_ERR(hdev->tfm_aes);
+ hdev->tfm_aes = NULL;
+ goto err_wqueue;
+ }
+
+ error = device_add(&hdev->dev);
+ if (error < 0)
+ goto err_tfm;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
- RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
+ RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
+ hdev);
if (hdev->rfkill) {
if (rfkill_register(hdev->rfkill) < 0) {
rfkill_destroy(hdev->rfkill);
@@ -969,37 +3974,75 @@ int hci_register_dev(struct hci_dev *hdev)
}
}
- mgmt_index_added(hdev->id);
+ if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
+ set_bit(HCI_RFKILLED, &hdev->dev_flags);
+
+ set_bit(HCI_SETUP, &hdev->dev_flags);
+ set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+
+ if (hdev->dev_type == HCI_BREDR) {
+ /* Assume BR/EDR support until proven otherwise (such as
+ * through reading supported features during init.
+ */
+ set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+ }
+
+ write_lock(&hci_dev_list_lock);
+ list_add(&hdev->list, &hci_dev_list);
+ write_unlock(&hci_dev_list_lock);
+
hci_notify(hdev, HCI_DEV_REG);
+ hci_dev_hold(hdev);
+
+ queue_work(hdev->req_workqueue, &hdev->power_on);
return id;
-nomem:
- write_lock_bh(&hci_dev_list_lock);
- list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+err_tfm:
+ crypto_free_blkcipher(hdev->tfm_aes);
+err_wqueue:
+ destroy_workqueue(hdev->workqueue);
+ destroy_workqueue(hdev->req_workqueue);
+err:
+ ida_simple_remove(&hci_index_ida, hdev->id);
- return -ENOMEM;
+ return error;
}
EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
{
- int i;
+ int i, id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- write_lock_bh(&hci_dev_list_lock);
+ set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+
+ id = hdev->id;
+
+ write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
- write_unlock_bh(&hci_dev_list_lock);
+ write_unlock(&hci_dev_list_lock);
hci_dev_do_close(hdev);
for (i = 0; i < NUM_REASSEMBLY; i++)
kfree_skb(hdev->reassembly[i]);
- mgmt_index_removed(hdev->id);
+ cancel_work_sync(&hdev->power_on);
+
+ if (!test_bit(HCI_INIT, &hdev->flags) &&
+ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ hci_dev_lock(hdev);
+ mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
+ }
+
+ /* mgmt_index_removed should take care of emptying the
+ * pending list */
+ BUG_ON(!list_empty(&hdev->mgmt_pending));
+
hci_notify(hdev, HCI_DEV_UNREG);
if (hdev->rfkill) {
@@ -1007,17 +4050,31 @@ int hci_unregister_dev(struct hci_dev *hdev)
rfkill_destroy(hdev->rfkill);
}
- hci_unregister_sysfs(hdev);
+ if (hdev->tfm_aes)
+ crypto_free_blkcipher(hdev->tfm_aes);
+
+ device_del(&hdev->dev);
+
+ debugfs_remove_recursive(hdev->debugfs);
destroy_workqueue(hdev->workqueue);
+ destroy_workqueue(hdev->req_workqueue);
- hci_dev_lock_bh(hdev);
+ hci_dev_lock(hdev);
hci_blacklist_clear(hdev);
- hci_dev_unlock_bh(hdev);
+ hci_uuids_clear(hdev);
+ hci_link_keys_clear(hdev);
+ hci_smp_ltks_clear(hdev);
+ hci_smp_irks_clear(hdev);
+ hci_remote_oob_data_clear(hdev);
+ hci_white_list_clear(hdev);
+ hci_conn_params_clear(hdev);
+ hci_pend_le_conns_clear(hdev);
+ hci_dev_unlock(hdev);
- __hci_dev_put(hdev);
+ hci_dev_put(hdev);
- return 0;
+ ida_simple_remove(&hci_index_ida, id);
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1038,31 +4095,29 @@ int hci_resume_dev(struct hci_dev *hdev)
EXPORT_SYMBOL(hci_resume_dev);
/* Receive frame from HCI drivers */
-int hci_recv_frame(struct sk_buff *skb)
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
- && !test_bit(HCI_INIT, &hdev->flags))) {
+ && !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
- /* Incomming skb */
+ /* Incoming skb */
bt_cb(skb)->incoming = 1;
/* Time stamp */
__net_timestamp(skb);
- /* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
- tasklet_schedule(&hdev->rx_task);
+ queue_work(hdev->workqueue, &hdev->rx_work);
return 0;
}
EXPORT_SYMBOL(hci_recv_frame);
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
- int count, __u8 index, gfp_t gfp_mask)
+ int count, __u8 index)
{
int len = 0;
int hlen = 0;
@@ -1071,7 +4126,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
struct bt_skb_cb *scb;
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
- index >= NUM_REASSEMBLY)
+ index >= NUM_REASSEMBLY)
return -EILSEQ;
skb = hdev->reassembly[index];
@@ -1092,7 +4147,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
break;
}
- skb = bt_skb_alloc(len, gfp_mask);
+ skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
@@ -1100,13 +4155,12 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
scb->expect = hlen;
scb->pkt_type = type;
- skb->dev = (void *) hdev;
hdev->reassembly[index] = skb;
}
while (count) {
scb = (void *) skb->cb;
- len = min(scb->expect, (__u16)count);
+ len = min_t(uint, scb->expect, count);
memcpy(skb_put(skb, len), data, len);
@@ -1160,7 +4214,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
/* Complete frame */
bt_cb(skb)->pkt_type = type;
- hci_recv_frame(skb);
+ hci_recv_frame(hdev, skb);
hdev->reassembly[index] = NULL;
return remain;
@@ -1178,14 +4232,13 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
return -EILSEQ;
while (count) {
- rem = hci_reassembly(hdev, type, data, count,
- type - 1, GFP_ATOMIC);
+ rem = hci_reassembly(hdev, type, data, count, type - 1);
if (rem < 0)
return rem;
data += (count - rem);
count = rem;
- };
+ }
return rem;
}
@@ -1213,14 +4266,14 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
} else
type = bt_cb(skb)->pkt_type;
- rem = hci_reassembly(hdev, type, data,
- count, STREAM_REASSEMBLY, GFP_ATOMIC);
+ rem = hci_reassembly(hdev, type, data, count,
+ STREAM_REASSEMBLY);
if (rem < 0)
return rem;
data += (count - rem);
count = rem;
- };
+ }
return rem;
}
@@ -1228,59 +4281,13 @@ EXPORT_SYMBOL(hci_recv_stream_fragment);
/* ---- Interface to upper protocols ---- */
-/* Register/Unregister protocols.
- * hci_task_lock is used to ensure that no tasks are running. */
-int hci_register_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (!hci_proto[hp->id])
- hci_proto[hp->id] = hp;
- else
- err = -EEXIST;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_register_proto);
-
-int hci_unregister_proto(struct hci_proto *hp)
-{
- int err = 0;
-
- BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
-
- if (hp->id >= HCI_MAX_PROTO)
- return -EINVAL;
-
- write_lock_bh(&hci_task_lock);
-
- if (hci_proto[hp->id])
- hci_proto[hp->id] = NULL;
- else
- err = -ENOENT;
-
- write_unlock_bh(&hci_task_lock);
-
- return err;
-}
-EXPORT_SYMBOL(hci_unregister_proto);
-
int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_add(&cb->list, &hci_cb_list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
@@ -1290,52 +4297,85 @@ int hci_unregister_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
- write_lock_bh(&hci_cb_list_lock);
+ write_lock(&hci_cb_list_lock);
list_del(&cb->list);
- write_unlock_bh(&hci_cb_list_lock);
+ write_unlock(&hci_cb_list_lock);
return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);
-static int hci_send_frame(struct sk_buff *skb)
+static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
- struct hci_dev *hdev = (struct hci_dev *) skb->dev;
+ BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
- if (!hdev) {
- kfree_skb(skb);
- return -ENODEV;
- }
+ /* Time stamp */
+ __net_timestamp(skb);
- BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
+ /* Send copy to monitor */
+ hci_send_to_monitor(hdev, skb);
if (atomic_read(&hdev->promisc)) {
- /* Time stamp */
- __net_timestamp(skb);
-
+ /* Send copy to the sockets */
hci_send_to_sock(hdev, skb);
}
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
- return hdev->send(skb);
+ if (hdev->send(hdev, skb) < 0)
+ BT_ERR("%s sending frame failed", hdev->name);
}
-/* Send HCI command */
-int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
+void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
+{
+ skb_queue_head_init(&req->cmd_q);
+ req->hdev = hdev;
+ req->err = 0;
+}
+
+int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("length %u", skb_queue_len(&req->cmd_q));
+
+ /* If an error occured during request building, remove all HCI
+ * commands queued on the HCI request queue.
+ */
+ if (req->err) {
+ skb_queue_purge(&req->cmd_q);
+ return req->err;
+ }
+
+ /* Do not allow empty requests */
+ if (skb_queue_empty(&req->cmd_q))
+ return -ENODATA;
+
+ skb = skb_peek_tail(&req->cmd_q);
+ bt_cb(skb)->req.complete = complete;
+
+ spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+ skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+
+ return 0;
+}
+
+static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
+ u32 plen, const void *param)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
struct sk_buff *skb;
- BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
-
skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb) {
- BT_ERR("%s no memory for command", hdev->name);
- return -ENOMEM;
- }
+ if (!skb)
+ return NULL;
hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(opcode);
@@ -1347,14 +4387,72 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
BT_DBG("skb len %d", skb->len);
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
- skb->dev = (void *) hdev;
+
+ return skb;
+}
+
+/* Send HCI command */
+int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
+ const void *param)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+ skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ if (!skb) {
+ BT_ERR("%s no memory for command", hdev->name);
+ return -ENOMEM;
+ }
+
+ /* Stand-alone HCI commands must be flaged as
+ * single-command requests.
+ */
+ bt_cb(skb)->req.start = true;
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
return 0;
}
+/* Queue a command to an asynchronous HCI request */
+void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+
+ BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+
+ /* If an error occured during request building, there is no point in
+ * queueing the HCI command. We can simply return.
+ */
+ if (req->err)
+ return;
+
+ skb = hci_prepare_cmd(hdev, opcode, plen, param);
+ if (!skb) {
+ BT_ERR("%s no memory for command (opcode 0x%4.4x)",
+ hdev->name, opcode);
+ req->err = -ENOMEM;
+ return;
+ }
+
+ if (skb_queue_empty(&req->cmd_q))
+ bt_cb(skb)->req.start = true;
+
+ bt_cb(skb)->req.event = event;
+
+ skb_queue_tail(&req->cmd_q, skb);
+}
+
+void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param)
+{
+ hci_req_add_ev(req, opcode, plen, param, 0);
+}
+
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
{
@@ -1368,7 +4466,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
@@ -1386,23 +4484,36 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
hdr->dlen = cpu_to_le16(len);
}
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
+ struct sk_buff *skb, __u16 flags)
{
+ struct hci_conn *conn = chan->conn;
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
- BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
+ skb->len = skb_headlen(skb);
+ skb->data_len = 0;
- skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ hci_add_acl_hdr(skb, conn->handle, flags);
+ break;
+ case HCI_AMP:
+ hci_add_acl_hdr(skb, chan->handle, flags);
+ break;
+ default:
+ BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
+ return;
+ }
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
- skb_queue_tail(&conn->data_q, skb);
+ skb_queue_tail(queue, skb);
} else {
/* Fragmented */
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1410,27 +4521,37 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
skb_shinfo(skb)->frag_list = NULL;
/* Queue all fragments atomically */
- spin_lock_bh(&conn->data_q.lock);
+ spin_lock(&queue->lock);
+
+ __skb_queue_tail(queue, skb);
- __skb_queue_tail(&conn->data_q, skb);
+ flags &= ~ACL_START;
+ flags |= ACL_CONT;
do {
skb = list; list = list->next;
- skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
+ hci_add_acl_hdr(skb, conn->handle, flags);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
- __skb_queue_tail(&conn->data_q, skb);
+ __skb_queue_tail(queue, skb);
} while (list);
- spin_unlock_bh(&conn->data_q.lock);
+ spin_unlock(&queue->lock);
}
+}
+
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+ struct hci_dev *hdev = chan->conn->hdev;
+
+ BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
+
+ hci_queue_acl(chan, &chan->data_q, skb, flags);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
-EXPORT_SYMBOL(hci_send_acl);
/* Send SCO data */
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -1447,30 +4568,28 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_reset_transport_header(skb);
memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
- skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
-EXPORT_SYMBOL(hci_send_sco);
/* ---- HCI TX task (outgoing data) ---- */
/* HCI Connection scheduler */
-static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
+static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *conn = NULL;
- int num = 0, min = ~0;
- struct list_head *p;
+ struct hci_conn *conn = NULL, *c;
+ unsigned int num = 0, min = ~0;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
- list_for_each(p, &h->list) {
- struct hci_conn *c;
- c = list_entry(p, struct hci_conn, list);
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
@@ -1483,11 +4602,33 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
min = c->sent;
conn = c;
}
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
}
+ rcu_read_unlock();
+
if (conn) {
- int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
- int q = cnt / num;
+ int cnt, q;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
*quote = q ? q : 1;
} else
*quote = 0;
@@ -1496,57 +4637,293 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
return conn;
}
-static inline void hci_acl_tx_to(struct hci_dev *hdev)
+static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
- struct list_head *p;
- struct hci_conn *c;
+ struct hci_conn *c;
- BT_ERR("%s ACL tx timeout", hdev->name);
+ BT_ERR("%s link tx timeout", hdev->name);
+
+ rcu_read_lock();
/* Kill stalled connections */
- list_for_each(p, &h->list) {
- c = list_entry(p, struct hci_conn, list);
- if (c->type == ACL_LINK && c->sent) {
- BT_ERR("%s killing stalled ACL connection %s",
- hdev->name, batostr(&c->dst));
- hci_acl_disconn(c, 0x13);
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->sent) {
+ BT_ERR("%s killing stalled connection %pMR",
+ hdev->name, &c->dst);
+ hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
}
}
+
+ rcu_read_unlock();
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_chan *chan = NULL;
+ unsigned int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
- struct sk_buff *skb;
- int quote;
+ int cnt, q, conn_num = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *tmp;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ conn_num++;
+
+ list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&tmp->data_q))
+ continue;
+
+ skb = skb_peek(&tmp->data_q);
+ if (skb->priority < cur_prio)
+ continue;
+
+ if (skb->priority > cur_prio) {
+ num = 0;
+ min = ~0;
+ cur_prio = skb->priority;
+ }
+
+ num++;
+
+ if (conn->sent < min) {
+ min = conn->sent;
+ chan = tmp;
+ }
+ }
+
+ if (hci_conn_num(hdev, type) == conn_num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ if (!chan)
+ return NULL;
+
+ switch (chan->conn->type) {
+ case ACL_LINK:
+ cnt = hdev->acl_cnt;
+ break;
+ case AMP_LINK:
+ cnt = hdev->block_cnt;
+ break;
+ case SCO_LINK:
+ case ESCO_LINK:
+ cnt = hdev->sco_cnt;
+ break;
+ case LE_LINK:
+ cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+ default:
+ cnt = 0;
+ BT_ERR("Unknown link type");
+ }
+
+ q = cnt / num;
+ *quote = q ? q : 1;
+ BT_DBG("chan %p quote %d", chan, *quote);
+ return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *conn;
+ int num = 0;
BT_DBG("%s", hdev->name);
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(conn, &h->list, list) {
+ struct hci_chan *chan;
+
+ if (conn->type != type)
+ continue;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ continue;
+
+ num++;
+
+ list_for_each_entry_rcu(chan, &conn->chan_list, list) {
+ struct sk_buff *skb;
+
+ if (chan->sent) {
+ chan->sent = 0;
+ continue;
+ }
+
+ if (skb_queue_empty(&chan->data_q))
+ continue;
+
+ skb = skb_peek(&chan->data_q);
+ if (skb->priority >= HCI_PRIO_MAX - 1)
+ continue;
+
+ skb->priority = HCI_PRIO_MAX - 1;
+
+ BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+ skb->priority);
+ }
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
+ }
+
+ rcu_read_unlock();
+
+}
+
+static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Calculate count of blocks used by this packet */
+ return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
+}
+
+static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
+{
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
- if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
- hci_acl_tx_to(hdev);
+ if (!cnt && time_after(jiffies, hdev->acl_last_tx +
+ HCI_ACL_TX_TIMEOUT))
+ hci_link_tx_to(hdev, ACL_LINK);
}
+}
- while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
- while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+static void hci_sched_acl_pkt(struct hci_dev *hdev)
+{
+ unsigned int cnt = hdev->acl_cnt;
+ struct hci_chan *chan;
+ struct sk_buff *skb;
+ int quote;
+
+ __check_timeout(hdev, cnt);
+
+ while (hdev->acl_cnt &&
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
- hci_conn_enter_active_mode(conn);
+ skb = skb_dequeue(&chan->data_q);
- hci_send_frame(skb);
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
+
+ hci_send_frame(hdev, skb);
hdev->acl_last_tx = jiffies;
hdev->acl_cnt--;
- conn->sent++;
+ chan->sent++;
+ chan->conn->sent++;
+ }
+ }
+
+ if (cnt != hdev->acl_cnt)
+ hci_prio_recalculate(hdev, ACL_LINK);
+}
+
+static void hci_sched_acl_blk(struct hci_dev *hdev)
+{
+ unsigned int cnt = hdev->block_cnt;
+ struct hci_chan *chan;
+ struct sk_buff *skb;
+ int quote;
+ u8 type;
+
+ __check_timeout(hdev, cnt);
+
+ BT_DBG("%s", hdev->name);
+
+ if (hdev->dev_type == HCI_AMP)
+ type = AMP_LINK;
+ else
+ type = ACL_LINK;
+
+ while (hdev->block_cnt > 0 &&
+ (chan = hci_chan_sent(hdev, type, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
+ int blocks;
+
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
+
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
+
+ skb = skb_dequeue(&chan->data_q);
+
+ blocks = __get_blocks(hdev, skb);
+ if (blocks > hdev->block_cnt)
+ return;
+
+ hci_conn_enter_active_mode(chan->conn,
+ bt_cb(skb)->force_active);
+
+ hci_send_frame(hdev, skb);
+ hdev->acl_last_tx = jiffies;
+
+ hdev->block_cnt -= blocks;
+ quote -= blocks;
+
+ chan->sent += blocks;
+ chan->conn->sent += blocks;
}
}
+
+ if (cnt != hdev->block_cnt)
+ hci_prio_recalculate(hdev, type);
+}
+
+static void hci_sched_acl(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ /* No ACL link over BR/EDR controller */
+ if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
+ return;
+
+ /* No AMP link over AMP controller */
+ if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
+ return;
+
+ switch (hdev->flow_ctl_mode) {
+ case HCI_FLOW_CTL_MODE_PACKET_BASED:
+ hci_sched_acl_pkt(hdev);
+ break;
+
+ case HCI_FLOW_CTL_MODE_BLOCK_BASED:
+ hci_sched_acl_blk(hdev);
+ break;
+ }
}
/* Schedule SCO */
-static inline void hci_sched_sco(struct hci_dev *hdev)
+static void hci_sched_sco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -1554,10 +4931,13 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, SCO_LINK))
+ return;
+
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
conn->sent++;
if (conn->sent == ~0)
@@ -1566,7 +4946,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
}
}
-static inline void hci_sched_esco(struct hci_dev *hdev)
+static void hci_sched_esco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -1574,10 +4954,14 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
+ if (!hci_conn_num(hdev, ESCO_LINK))
+ return;
+
+ while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
+ &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
- hci_send_frame(skb);
+ hci_send_frame(hdev, skb);
conn->sent++;
if (conn->sent == ~0)
@@ -1586,34 +4970,82 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
}
}
-static void hci_tx_task(unsigned long arg)
+static void hci_sched_le(struct hci_dev *hdev)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_chan *chan;
struct sk_buff *skb;
+ int quote, cnt, tmp;
+
+ BT_DBG("%s", hdev->name);
- read_lock(&hci_task_lock);
+ if (!hci_conn_num(hdev, LE_LINK))
+ return;
- BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
+ if (!test_bit(HCI_RAW, &hdev->flags)) {
+ /* LE tx timeout must be longer than maximum
+ * link supervision timeout (40.9 seconds) */
+ if (!hdev->le_cnt && hdev->le_pkts &&
+ time_after(jiffies, hdev->le_last_tx + HZ * 45))
+ hci_link_tx_to(hdev, LE_LINK);
+ }
- /* Schedule queues and send stuff to HCI driver */
+ cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+ tmp = cnt;
+ while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ u32 priority = (skb_peek(&chan->data_q))->priority;
+ while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ skb->len, skb->priority);
- hci_sched_acl(hdev);
+ /* Stop if priority has changed */
+ if (skb->priority < priority)
+ break;
- hci_sched_sco(hdev);
+ skb = skb_dequeue(&chan->data_q);
- hci_sched_esco(hdev);
+ hci_send_frame(hdev, skb);
+ hdev->le_last_tx = jiffies;
+
+ cnt--;
+ chan->sent++;
+ chan->conn->sent++;
+ }
+ }
+
+ if (hdev->le_pkts)
+ hdev->le_cnt = cnt;
+ else
+ hdev->acl_cnt = cnt;
+
+ if (cnt != tmp)
+ hci_prio_recalculate(hdev, LE_LINK);
+}
+
+static void hci_tx_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
+ struct sk_buff *skb;
+
+ BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
+ hdev->sco_cnt, hdev->le_cnt);
+
+ if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ /* Schedule queues and send stuff to HCI driver */
+ hci_sched_acl(hdev);
+ hci_sched_sco(hdev);
+ hci_sched_esco(hdev);
+ hci_sched_le(hdev);
+ }
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
- hci_send_frame(skb);
-
- read_unlock(&hci_task_lock);
+ hci_send_frame(hdev, skb);
}
-/* ----- HCI RX task (incoming data proccessing) ----- */
+/* ----- HCI RX task (incoming data processing) ----- */
/* ACL data packet */
-static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -1625,7 +5057,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
flags = hci_flags(handle);
handle = hci_handle(handle);
- BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
+ BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
+ handle, flags);
hdev->stat.acl_rx++;
@@ -1634,26 +5067,21 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
- hci_conn_enter_active_mode(conn);
+ hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_L2CAP];
- if (hp && hp->recv_acldata) {
- hp->recv_acldata(conn, skb, flags);
- return;
- }
+ l2cap_recv_acldata(conn, skb, flags);
+ return;
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
}
/* SCO data packet */
-static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -1663,7 +5091,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
handle = __le16_to_cpu(hdr->handle);
- BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
+ BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
hdev->stat.sco_rx++;
@@ -1672,38 +5100,134 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
if (conn) {
- register struct hci_proto *hp;
-
/* Send to upper protocol */
- hp = hci_proto[HCI_PROTO_SCO];
- if (hp && hp->recv_scodata) {
- hp->recv_scodata(conn, skb);
- return;
- }
+ sco_recv_scodata(conn, skb);
+ return;
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
}
-static void hci_rx_task(unsigned long arg)
+static bool hci_req_is_complete(struct hci_dev *hdev)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
- BT_DBG("%s", hdev->name);
+ skb = skb_peek(&hdev->cmd_q);
+ if (!skb)
+ return true;
+
+ return bt_cb(skb)->req.start;
+}
- read_lock(&hci_task_lock);
+static void hci_resend_last(struct hci_dev *hdev)
+{
+ struct hci_command_hdr *sent;
+ struct sk_buff *skb;
+ u16 opcode;
+
+ if (!hdev->sent_cmd)
+ return;
+
+ sent = (void *) hdev->sent_cmd->data;
+ opcode = __le16_to_cpu(sent->opcode);
+ if (opcode == HCI_OP_RESET)
+ return;
+
+ skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+}
+
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
+{
+ hci_req_complete_t req_complete = NULL;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
+
+ /* If the completed command doesn't match the last one that was
+ * sent we need to do special handling of it.
+ */
+ if (!hci_sent_cmd_data(hdev, opcode)) {
+ /* Some CSR based controllers generate a spontaneous
+ * reset complete event during init and any pending
+ * command will never be completed. In such a case we
+ * need to resend whatever was the last sent
+ * command.
+ */
+ if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
+ hci_resend_last(hdev);
+
+ return;
+ }
+
+ /* If the command succeeded and there's still more commands in
+ * this request the request is not yet complete.
+ */
+ if (!status && !hci_req_is_complete(hdev))
+ return;
+
+ /* If this was the last command in a request the complete
+ * callback would be found in hdev->sent_cmd instead of the
+ * command queue (hdev->cmd_q).
+ */
+ if (hdev->sent_cmd) {
+ req_complete = bt_cb(hdev->sent_cmd)->req.complete;
+
+ if (req_complete) {
+ /* We must set the complete callback to NULL to
+ * avoid calling the callback more than once if
+ * this function gets called again.
+ */
+ bt_cb(hdev->sent_cmd)->req.complete = NULL;
+
+ goto call_complete;
+ }
+ }
+
+ /* Remove all pending commands belonging to this request */
+ spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+ while ((skb = __skb_dequeue(&hdev->cmd_q))) {
+ if (bt_cb(skb)->req.start) {
+ __skb_queue_head(&hdev->cmd_q, skb);
+ break;
+ }
+
+ req_complete = bt_cb(skb)->req.complete;
+ kfree_skb(skb);
+ }
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+call_complete:
+ if (req_complete)
+ req_complete(hdev, status);
+}
+
+static void hci_rx_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
+ struct sk_buff *skb;
+
+ BT_DBG("%s", hdev->name);
while ((skb = skb_dequeue(&hdev->rx_q))) {
+ /* Send copy to monitor */
+ hci_send_to_monitor(hdev, skb);
+
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
hci_send_to_sock(hdev, skb);
}
- if (test_bit(HCI_RAW, &hdev->flags)) {
+ if (test_bit(HCI_RAW, &hdev->flags) ||
+ test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
kfree_skb(skb);
continue;
}
@@ -1721,6 +5245,7 @@ static void hci_rx_task(unsigned long arg)
/* Process frame */
switch (bt_cb(skb)->pkt_type) {
case HCI_EVENT_PKT:
+ BT_DBG("%s Event packet", hdev->name);
hci_event_packet(hdev, skb);
break;
@@ -1739,34 +5264,137 @@ static void hci_rx_task(unsigned long arg)
break;
}
}
-
- read_unlock(&hci_task_lock);
}
-static void hci_cmd_task(unsigned long arg)
+static void hci_cmd_work(struct work_struct *work)
{
- struct hci_dev *hdev = (struct hci_dev *) arg;
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
- BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
-
- if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
- BT_ERR("%s command tx timeout", hdev->name);
- atomic_set(&hdev->cmd_cnt, 1);
- }
+ BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
+ atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
/* Send queued commands */
- if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
+ if (atomic_read(&hdev->cmd_cnt)) {
+ skb = skb_dequeue(&hdev->cmd_q);
+ if (!skb)
+ return;
+
kfree_skb(hdev->sent_cmd);
- hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
- hci_send_frame(skb);
- hdev->cmd_last_tx = jiffies;
+ hci_send_frame(hdev, skb);
+ if (test_bit(HCI_RESET, &hdev->flags))
+ del_timer(&hdev->cmd_timer);
+ else
+ mod_timer(&hdev->cmd_timer,
+ jiffies + HCI_CMD_TIMEOUT);
} else {
skb_queue_head(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
}
+
+void hci_req_add_le_scan_disable(struct hci_request *req)
+{
+ struct hci_cp_le_set_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = LE_SCAN_DISABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+void hci_req_add_le_passive_scan(struct hci_request *req)
+{
+ struct hci_cp_le_set_scan_param param_cp;
+ struct hci_cp_le_set_scan_enable enable_cp;
+ struct hci_dev *hdev = req->hdev;
+ u8 own_addr_type;
+
+ /* Set require_privacy to true to avoid identification from
+ * unknown peer devices. Since this is passive scanning, no
+ * SCAN_REQ using the local identity should be sent. Mandating
+ * privacy is just an extra precaution.
+ */
+ if (hci_update_random_address(req, true, &own_addr_type))
+ return;
+
+ memset(&param_cp, 0, sizeof(param_cp));
+ param_cp.type = LE_SCAN_PASSIVE;
+ param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
+ param_cp.window = cpu_to_le16(hdev->le_scan_window);
+ param_cp.own_address_type = own_addr_type;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+ &param_cp);
+
+ memset(&enable_cp, 0, sizeof(enable_cp));
+ enable_cp.enable = LE_SCAN_ENABLE;
+ enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+ &enable_cp);
+}
+
+static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
+{
+ if (status)
+ BT_DBG("HCI request failed to update background scanning: "
+ "status 0x%2.2x", status);
+}
+
+/* This function controls the background scanning based on hdev->pend_le_conns
+ * list. If there are pending LE connection we start the background scanning,
+ * otherwise we stop it.
+ *
+ * This function requires the caller holds hdev->lock.
+ */
+void hci_update_background_scan(struct hci_dev *hdev)
+{
+ struct hci_request req;
+ struct hci_conn *conn;
+ int err;
+
+ hci_req_init(&req, hdev);
+
+ if (list_empty(&hdev->pend_le_conns)) {
+ /* If there is no pending LE connections, we should stop
+ * the background scanning.
+ */
+
+ /* If controller is not scanning we are done. */
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return;
+
+ hci_req_add_le_scan_disable(&req);
+
+ BT_DBG("%s stopping background scanning", hdev->name);
+ } else {
+ /* If there is at least one pending LE connection, we should
+ * keep the background scan running.
+ */
+
+ /* If controller is connecting, we should not start scanning
+ * since some controllers are not able to scan and connect at
+ * the same time.
+ */
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ return;
+
+ /* If controller is currently scanning, we stop it to ensure we
+ * don't miss any advertising (due to duplicates filter).
+ */
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ hci_req_add_le_scan_disable(&req);
+
+ hci_req_add_le_passive_scan(&req);
+
+ BT_DBG("%s starting background scanning", hdev->name);
+ }
+
+ err = hci_req_run(&req, update_background_scan_complete);
+ if (err)
+ BT_ERR("Failed to run HCI request: err %d", err);
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a290854fdaa..640c54ec1bd 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,26 +24,14 @@
/* Bluetooth HCI event handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "a2mp.h"
+#include "amp.h"
/* Handle HCI Event packets */
@@ -51,33 +39,50 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
clear_bit(HCI_INQUIRY, &hdev->flags);
+ smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
+ wake_up_bit(&hdev->flags, HCI_INQUIRY);
- hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ hci_dev_unlock(hdev);
hci_conn_check_pending(hdev);
}
+static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+}
+
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
- clear_bit(HCI_INQUIRY, &hdev->flags);
+ clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
hci_conn_check_pending(hdev);
}
-static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
@@ -87,7 +92,7 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_role_discovery *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -110,7 +115,7 @@ static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_read_link_policy *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -130,7 +135,7 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_conn *conn;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -148,11 +153,12 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -160,12 +166,13 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
hdev->link_policy = __le16_to_cpu(rp->policy);
}
-static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
if (!sent)
@@ -173,17 +180,32 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *s
if (!status)
hdev->link_policy = get_unaligned_le16(sent);
-
- hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
}
static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ clear_bit(HCI_RESET, &hdev->flags);
+
+ /* Reset all non-persistent flags */
+ hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
- hci_req_complete(hdev, HCI_OP_RESET, status);
+ hdev->discovery.state = DISCOVERY_STOPPED;
+ hdev->inq_tx_power = HCI_TX_POWER_INVALID;
+ hdev->adv_tx_power = HCI_TX_POWER_INVALID;
+
+ memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
+ hdev->adv_data_len = 0;
+
+ memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
+ hdev->scan_rsp_data_len = 0;
+
+ hdev->le_scan_type = LE_SCAN_PASSIVE;
+
+ hdev->ssp_debug_mode = 0;
}
static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -191,28 +213,33 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- if (status)
- return;
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
if (!sent)
return;
- memcpy(hdev->dev_name, sent, 248);
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_set_local_name_complete(hdev, sent, status);
+ else if (!status)
+ memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_name *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- memcpy(hdev->dev_name, rp->name, 248);
+ if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -220,7 +247,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
if (!sent)
@@ -235,7 +262,8 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
clear_bit(HCI_AUTH, &hdev->flags);
}
- hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_auth_enable_complete(hdev, status);
}
static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
@@ -243,7 +271,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
if (!sent)
@@ -257,42 +285,61 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
else
clear_bit(HCI_ENCRYPT, &hdev->flags);
}
-
- hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
}
static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ __u8 param, status = *((__u8 *) skb->data);
+ int old_pscan, old_iscan;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
if (!sent)
return;
- if (!status) {
- __u8 param = *((__u8 *) sent);
-
- clear_bit(HCI_PSCAN, &hdev->flags);
- clear_bit(HCI_ISCAN, &hdev->flags);
+ param = *((__u8 *) sent);
- if (param & SCAN_INQUIRY)
- set_bit(HCI_ISCAN, &hdev->flags);
+ hci_dev_lock(hdev);
- if (param & SCAN_PAGE)
- set_bit(HCI_PSCAN, &hdev->flags);
+ if (status) {
+ mgmt_write_scan_failed(hdev, param, status);
+ hdev->discov_timeout = 0;
+ goto done;
}
- hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
+ /* We need to ensure that we set this back on if someone changed
+ * the scan mode through a raw HCI socket.
+ */
+ set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+ old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+
+ if (param & SCAN_INQUIRY) {
+ set_bit(HCI_ISCAN, &hdev->flags);
+ if (!old_iscan)
+ mgmt_discoverable(hdev, 1);
+ } else if (old_iscan)
+ mgmt_discoverable(hdev, 0);
+
+ if (param & SCAN_PAGE) {
+ set_bit(HCI_PSCAN, &hdev->flags);
+ if (!old_pscan)
+ mgmt_connectable(hdev, 1);
+ } else if (old_pscan)
+ mgmt_connectable(hdev, 0);
+
+done:
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -300,7 +347,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
memcpy(hdev->dev_class, rp->dev_class, 3);
BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -308,16 +355,21 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
-
- if (status)
- return;
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
return;
- memcpy(hdev->dev_class, sent, 3);
+ hci_dev_lock(hdev);
+
+ if (status == 0)
+ memcpy(hdev->dev_class, sent, 3);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_set_class_of_dev_complete(hdev, sent, status);
+
+ hci_dev_unlock(hdev);
}
static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
@@ -325,7 +377,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_read_voice_setting *rp = (void *) skb->data;
__u16 setting;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -337,22 +389,20 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
}
-static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_voice_setting(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
__u16 setting;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -368,88 +418,121 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
- if (hdev->notify) {
- tasklet_disable(&hdev->tx_task);
+ if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
- tasklet_enable(&hdev->tx_task);
- }
}
-static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
-
- BT_DBG("%s status 0x%x", hdev->name, status);
+ struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
- hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
-}
-
-static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
-
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- hdev->ssp_mode = rp->mode;
+ hdev->num_iac = rp->num_iac;
+
+ BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
}
static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- void *sent;
+ struct hci_cp_write_ssp_mode *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
- if (status)
+ sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
+ if (!sent)
return;
- sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
+ if (!status) {
+ if (sent->mode)
+ hdev->features[1][0] |= LMP_HOST_SSP;
+ else
+ hdev->features[1][0] &= ~LMP_HOST_SSP;
+ }
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_ssp_enable_complete(hdev, sent->mode, status);
+ else if (!status) {
+ if (sent->mode)
+ set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ else
+ clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ }
+}
+
+static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u8 status = *((u8 *) skb->data);
+ struct hci_cp_write_sc_support *sent;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
if (!sent)
return;
- hdev->ssp_mode = *((__u8 *) sent);
+ if (!status) {
+ if (sent->support)
+ hdev->features[1][0] |= LMP_HOST_SC;
+ else
+ hdev->features[1][0] &= ~LMP_HOST_SC;
+ }
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_sc_enable_complete(hdev, sent->support, status);
+ else if (!status) {
+ if (sent->support)
+ set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ else
+ clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ }
}
static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_version *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- hdev->hci_ver = rp->hci_ver;
- hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
- hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
-
- BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
- hdev->manufacturer,
- hdev->hci_ver, hdev->hci_rev);
+ if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ hdev->hci_ver = rp->hci_ver;
+ hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+ hdev->lmp_ver = rp->lmp_ver;
+ hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+ hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
+ }
}
-static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_commands(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_commands *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
- memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
+ if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
-static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -459,52 +542,74 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
/* Adjust default settings according to features
* supported by device. */
- if (hdev->features[0] & LMP_3SLOT)
+ if (hdev->features[0][0] & LMP_3SLOT)
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
- if (hdev->features[0] & LMP_5SLOT)
+ if (hdev->features[0][0] & LMP_5SLOT)
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
- if (hdev->features[1] & LMP_HV2) {
+ if (hdev->features[0][1] & LMP_HV2) {
hdev->pkt_type |= (HCI_HV2);
hdev->esco_type |= (ESCO_HV2);
}
- if (hdev->features[1] & LMP_HV3) {
+ if (hdev->features[0][1] & LMP_HV3) {
hdev->pkt_type |= (HCI_HV3);
hdev->esco_type |= (ESCO_HV3);
}
- if (hdev->features[3] & LMP_ESCO)
+ if (lmp_esco_capable(hdev))
hdev->esco_type |= (ESCO_EV3);
- if (hdev->features[4] & LMP_EV4)
+ if (hdev->features[0][4] & LMP_EV4)
hdev->esco_type |= (ESCO_EV4);
- if (hdev->features[4] & LMP_EV5)
+ if (hdev->features[0][4] & LMP_EV5)
hdev->esco_type |= (ESCO_EV5);
- if (hdev->features[5] & LMP_EDR_ESCO_2M)
+ if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
hdev->esco_type |= (ESCO_2EV3);
- if (hdev->features[5] & LMP_EDR_ESCO_3M)
+ if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
hdev->esco_type |= (ESCO_3EV3);
- if (hdev->features[5] & LMP_EDR_3S_ESCO)
+ if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
+}
+
+static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
- BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
+ if (hdev->max_page < rp->max_page)
+ hdev->max_page = rp->max_page;
+
+ if (rp->page < HCI_MAX_PAGES)
+ memcpy(hdev->features[rp->page], rp->features, 8);
+}
+
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->flow_ctl_mode = rp->mode;
}
static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -522,50 +627,699 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
hdev->acl_cnt = hdev->acl_pkts;
hdev->sco_cnt = hdev->sco_pkts;
- BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
- hdev->acl_mtu, hdev->acl_pkts,
- hdev->sco_mtu, hdev->sco_pkts);
+ BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
+ hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
}
static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_bd_addr *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (!rp->status)
bacpy(&hdev->bdaddr, &rp->bdaddr);
+}
+
+static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
+ hdev->page_scan_interval = __le16_to_cpu(rp->interval);
+ hdev->page_scan_window = __le16_to_cpu(rp->window);
+ }
+}
+
+static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ u8 status = *((u8 *) skb->data);
+ struct hci_cp_write_page_scan_activity *sent;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
+ if (!sent)
+ return;
- hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
+ hdev->page_scan_interval = __le16_to_cpu(sent->interval);
+ hdev->page_scan_window = __le16_to_cpu(sent->window);
}
-static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
+ hdev->page_scan_type = rp->type;
+}
+
+static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ u8 status = *((u8 *) skb->data);
+ u8 *type;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
+ if (type)
+ hdev->page_scan_type = *type;
+}
+
+static void hci_cc_read_data_block_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+ hdev->block_len = __le16_to_cpu(rp->block_len);
+ hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
+
+ hdev->block_cnt = hdev->num_blocks;
+
+ BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+ hdev->block_cnt, hdev->block_len);
+}
+
+static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ goto a2mp_rsp;
+
+ hdev->amp_status = rp->amp_status;
+ hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+ hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+ hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+ hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+ hdev->amp_type = rp->amp_type;
+ hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+ hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+ hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+ hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+a2mp_rsp:
+ a2mp_send_getinfo_rsp(hdev);
+}
+
+static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
+ struct amp_assoc *assoc = &hdev->loc_assoc;
+ size_t rem_len, frag_len;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ goto a2mp_rsp;
+
+ frag_len = skb->len - sizeof(*rp);
+ rem_len = __le16_to_cpu(rp->rem_len);
+
+ if (rem_len > frag_len) {
+ BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+ memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+ assoc->offset += frag_len;
+
+ /* Read other fragments */
+ amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+ return;
+ }
+
+ memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+ assoc->len = assoc->offset + rem_len;
+ assoc->offset = 0;
+
+a2mp_rsp:
+ /* Send A2MP Rsp when all fragments are received */
+ a2mp_send_getampassoc_rsp(hdev, rp->status);
+ a2mp_send_create_phy_link_req(hdev, rp->status);
+}
+
+static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->inq_tx_power = rp->tx_power;
+}
+
+static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_pin_code_reply *rp = (void *) skb->data;
+ struct hci_cp_pin_code_reply *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
+
+ if (rp->status)
+ goto unlock;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
+ if (!cp)
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn)
+ conn->pin_length = cp->pin_len;
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
+ rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
+ hdev->le_pkts = rp->le_max_pkt;
+
+ hdev->le_cnt = hdev->le_pkts;
+
+ BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+}
+
+static void hci_cc_le_read_local_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_local_features *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (!rp->status)
+ memcpy(hdev->le_features, rp->features, 8);
+}
+
+static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->adv_tx_power = rp->tx_power;
+}
+
+static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
+ rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
+ ACL_LINK, 0, rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
+ 0, rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
+ ACL_LINK, 0, rp->status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+ mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
+ NULL, NULL, rp->status);
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ hci_dev_lock(hdev);
+ mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
+ rp->hash256, rp->randomizer256,
+ rp->status);
+ hci_dev_unlock(hdev);
+}
+
+
+static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
+ bdaddr_t *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
- hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
+ if (!sent)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (!status)
+ bacpy(&hdev->random_addr, sent);
+
+ hci_dev_unlock(hdev);
}
-static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
{
- BT_DBG("%s status 0x%x", hdev->name, status);
+ __u8 *sent, status = *((__u8 *) skb->data);
- if (status) {
- hci_req_complete(hdev, HCI_OP_INQUIRY, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
+ if (!sent)
+ return;
+
+ if (status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ /* If we're doing connection initation as peripheral. Set a
+ * timeout in case something goes wrong.
+ */
+ if (*sent) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (conn)
+ queue_delayed_work(hdev->workqueue,
+ &conn->le_conn_timeout,
+ HCI_LE_CONN_TIMEOUT);
+ }
+
+ mgmt_advertising(hdev, *sent);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_cp_le_set_scan_param *cp;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (!status)
+ hdev->le_scan_type = cp->type;
+
+ hci_dev_unlock(hdev);
+}
+
+static bool has_pending_adv_report(struct hci_dev *hdev)
+{
+ struct discovery_state *d = &hdev->discovery;
+
+ return bacmp(&d->last_adv_addr, BDADDR_ANY);
+}
+
+static void clear_pending_adv_report(struct hci_dev *hdev)
+{
+ struct discovery_state *d = &hdev->discovery;
+
+ bacpy(&d->last_adv_addr, BDADDR_ANY);
+ d->last_adv_data_len = 0;
+}
+
+static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+{
+ struct discovery_state *d = &hdev->discovery;
+
+ bacpy(&d->last_adv_addr, bdaddr);
+ d->last_adv_addr_type = bdaddr_type;
+ d->last_adv_rssi = rssi;
+ memcpy(d->last_adv_data, data, len);
+ d->last_adv_data_len = len;
+}
+
+static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_set_scan_enable *cp;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
+ if (!cp)
+ return;
+
+ if (status)
+ return;
+
+ switch (cp->enable) {
+ case LE_SCAN_ENABLE:
+ set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+ if (hdev->le_scan_type == LE_SCAN_ACTIVE)
+ clear_pending_adv_report(hdev);
+ break;
+
+ case LE_SCAN_DISABLE:
+ /* We do this here instead of when setting DISCOVERY_STOPPED
+ * since the latter would potentially require waiting for
+ * inquiry to stop too.
+ */
+ if (has_pending_adv_report(hdev)) {
+ struct discovery_state *d = &hdev->discovery;
+
+ mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+ d->last_adv_addr_type, NULL,
+ d->last_adv_rssi, 0, 1,
+ d->last_adv_data,
+ d->last_adv_data_len, NULL, 0);
+ }
+
+ /* Cancel this timer so that we don't try to disable scanning
+ * when it's already disabled.
+ */
+ cancel_delayed_work(&hdev->le_scan_disable);
+
+ clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+ /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
+ * interrupted scanning due to a connect request. Mark
+ * therefore discovery as stopped.
+ */
+ if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
+ &hdev->dev_flags))
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ break;
+
+ default:
+ BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
+ break;
+ }
+}
+
+static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
+
+ if (!rp->status)
+ hdev->le_white_list_size = rp->size;
+}
+
+static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (!status)
+ hci_white_list_clear(hdev);
+}
+
+static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_add_to_white_list *sent;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
+ if (!sent)
+ return;
+
+ if (!status)
+ hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
+}
+
+static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_del_from_white_list *sent;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
+ if (!sent)
+ return;
+
+ if (!status)
+ hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
+}
+
+static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (!rp->status)
+ memcpy(hdev->le_states, rp->le_states, 8);
+}
+
+static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_write_le_host_supported *sent;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
+ if (!sent)
+ return;
+
+ if (!status) {
+ if (sent->le) {
+ hdev->features[1][0] |= LMP_HOST_LE;
+ set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ } else {
+ hdev->features[1][0] &= ~LMP_HOST_LE;
+ clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ }
+
+ if (sent->simul)
+ hdev->features[1][0] |= LMP_HOST_LE_BREDR;
+ else
+ hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
+ }
+}
+
+static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_cp_le_set_adv_param *cp;
+ u8 status = *((u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+ hdev->adv_addr_type = cp->own_address_type;
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
+
+ BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+ hdev->name, rp->status, rp->phy_handle);
+
+ if (rp->status)
+ return;
+
+ amp_write_rem_assoc_continue(hdev, rp->phy_handle);
+}
+
+static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_rp_read_rssi *rp = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
+ if (conn)
+ conn->rssi = rp->rssi;
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_cp_read_tx_power *sent;
+ struct hci_rp_read_tx_power *rp = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
+ if (!sent)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
+ if (!conn)
+ goto unlock;
+
+ switch (sent->type) {
+ case 0x00:
+ conn->tx_power = rp->tx_power;
+ break;
+ case 0x01:
+ conn->max_tx_power = rp->tx_power;
+ break;
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+{
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status) {
hci_conn_check_pending(hdev);
- } else
- set_bit(HCI_INQUIRY, &hdev->flags);
+ return;
+ }
+
+ set_bit(HCI_INQUIRY, &hdev->flags);
}
-static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_create_conn *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
if (!cp)
@@ -575,7 +1329,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
+ BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -590,7 +1344,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
if (!conn) {
conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
if (conn) {
- conn->out = 1;
+ conn->out = true;
conn->link_mode |= HCI_LM_MASTER;
} else
BT_ERR("No memory for new connection");
@@ -606,7 +1360,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -617,16 +1371,19 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
- if (acl && (sco = acl->link)) {
- sco->state = BT_CLOSED;
+ if (acl) {
+ sco = acl->link;
+ if (sco) {
+ sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
- hci_conn_del(sco);
+ hci_proto_connect_cfm(sco, status);
+ hci_conn_del(sco);
+ }
}
hci_dev_unlock(hdev);
@@ -637,7 +1394,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
struct hci_cp_auth_requested *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -652,7 +1409,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
}
@@ -664,7 +1421,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
struct hci_cp_set_conn_encrypt *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -679,7 +1436,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
}
@@ -687,7 +1444,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
}
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
- struct hci_conn *conn)
+ struct hci_conn *conn)
{
if (conn->state != BT_CONFIG || !conn->out)
return 0;
@@ -696,20 +1453,102 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
return 0;
/* Only request authentication for SSP connections or non-SSP
- * devices with sec_level HIGH */
- if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
- conn->pending_sec_level != BT_SECURITY_HIGH)
+ * devices with sec_level MEDIUM or HIGH or if MITM protection
+ * is requested.
+ */
+ if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
+ conn->pending_sec_level != BT_SECURITY_FIPS &&
+ conn->pending_sec_level != BT_SECURITY_HIGH &&
+ conn->pending_sec_level != BT_SECURITY_MEDIUM)
return 0;
return 1;
}
+static int hci_resolve_name(struct hci_dev *hdev,
+ struct inquiry_entry *e)
+{
+ struct hci_cp_remote_name_req cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ cp.pscan_rep_mode = e->data.pscan_rep_mode;
+ cp.pscan_mode = e->data.pscan_mode;
+ cp.clock_offset = e->data.clock_offset;
+
+ return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
+}
+
+static bool hci_resolve_next_name(struct hci_dev *hdev)
+{
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ if (list_empty(&discov->resolve))
+ return false;
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+ if (!e)
+ return false;
+
+ if (hci_resolve_name(hdev, e) == 0) {
+ e->name_state = NAME_PENDING;
+ return true;
+ }
+
+ return false;
+}
+
+static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
+ bdaddr_t *bdaddr, u8 *name, u8 name_len)
+{
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
+ name_len, conn->dev_class);
+
+ if (discov->state == DISCOVERY_STOPPED)
+ return;
+
+ if (discov->state == DISCOVERY_STOPPING)
+ goto discov_complete;
+
+ if (discov->state != DISCOVERY_RESOLVING)
+ return;
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
+ /* If the device was not found in a list of found devices names of which
+ * are pending. there is no need to continue resolving a next name as it
+ * will be done upon receiving another Remote Name Request Complete
+ * Event */
+ if (!e)
+ return;
+
+ list_del(&e->list);
+ if (name) {
+ e->name_state = NAME_KNOWN;
+ mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
+ e->data.rssi, name, name_len);
+ } else {
+ e->name_state = NAME_NOT_KNOWN;
+ }
+
+ if (hci_resolve_next_name(hdev))
+ return;
+
+discov_complete:
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+}
+
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_remote_name_req *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
/* If successful wait for the name req complete event before
* checking for the need to do authentication */
@@ -723,12 +1562,25 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- if (conn && hci_outgoing_auth_needed(hdev, conn)) {
- struct hci_cp_auth_requested cp;
- cp.handle = __cpu_to_le16(conn->handle);
- hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
+
+ if (!conn)
+ goto unlock;
+
+ if (!hci_outgoing_auth_needed(hdev, conn))
+ goto unlock;
+
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+ struct hci_cp_auth_requested auth_cp;
+
+ auth_cp.handle = __cpu_to_le16(conn->handle);
+ hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(auth_cp), &auth_cp);
}
+unlock:
hci_dev_unlock(hdev);
}
@@ -737,7 +1589,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -752,7 +1604,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
}
@@ -764,7 +1616,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_ext_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -779,7 +1631,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
if (conn) {
if (conn->state == BT_CONFIG) {
hci_proto_connect_cfm(conn, status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
}
@@ -792,7 +1644,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -803,16 +1655,19 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
acl = hci_conn_hash_lookup_handle(hdev, handle);
- if (acl && (sco = acl->link)) {
- sco->state = BT_CLOSED;
+ if (acl) {
+ sco = acl->link;
+ if (sco) {
+ sco->state = BT_CLOSED;
- hci_proto_connect_cfm(sco, status);
- hci_conn_del(sco);
+ hci_proto_connect_cfm(sco, status);
+ hci_conn_del(sco);
+ }
}
hci_dev_unlock(hdev);
@@ -823,7 +1678,7 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -836,9 +1691,9 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, status);
}
@@ -850,7 +1705,7 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_exit_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -863,29 +1718,201 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) {
- clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
+ clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, status);
}
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_cp_disconnect *cp;
+ struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, status);
+ if (!status)
+ return;
- clear_bit(HCI_INQUIRY, &hdev->flags);
+ cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
+ if (conn)
+ mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, status);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_create_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+ if (!cp)
+ return;
- hci_req_complete(hdev, HCI_OP_INQUIRY, status);
+ hci_dev_lock(hdev);
+
+ if (status) {
+ struct hci_conn *hcon;
+
+ hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+ if (hcon)
+ hci_conn_del(hcon);
+ } else {
+ amp_write_remote_assoc(hdev, cp->phy_handle);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_accept_phy_link *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+ if (!cp)
+ return;
+
+ amp_write_remote_assoc(hdev, cp->phy_handle);
+}
+
+static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_le_create_conn *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ /* All connection failure handling is taken care of by the
+ * hci_le_conn_failed function which is triggered by the HCI
+ * request completion callbacks used for connecting.
+ */
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+ if (!conn)
+ goto unlock;
+
+ /* Store the initiator and responder address information which
+ * is needed for SMP. These values will not change during the
+ * lifetime of the connection.
+ */
+ conn->init_addr_type = cp->own_address_type;
+ if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
+ bacpy(&conn->init_addr, &hdev->random_addr);
+ else
+ bacpy(&conn->init_addr, &hdev->bdaddr);
+
+ conn->resp_addr_type = cp->peer_addr_type;
+ bacpy(&conn->resp_addr, &cp->peer_addr);
+
+ /* We don't want the connection attempt to stick around
+ * indefinitely since LE doesn't have a page timeout concept
+ * like BR/EDR. Set a timer for any connection that doesn't use
+ * the white list for connecting.
+ */
+ if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->le_conn_timeout,
+ HCI_LE_CONN_TIMEOUT);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_le_start_enc *cp;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (!status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
+ if (!cp)
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
+ if (!conn)
+ goto unlock;
+
+ if (conn->state != BT_CONNECTED)
+ goto unlock;
+
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+ struct discovery_state *discov = &hdev->discovery;
+ struct inquiry_entry *e;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_conn_check_pending(hdev);
+
+ if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+ return;
+
+ smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
+ wake_up_bit(&hdev->flags, HCI_INQUIRY);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ return;
+
+ hci_dev_lock(hdev);
+
+ if (discov->state != DISCOVERY_FINDING)
+ goto unlock;
+
+ if (list_empty(&discov->resolve)) {
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+ if (e && hci_resolve_name(hdev, e) == 0) {
+ e->name_state = NAME_PENDING;
+ hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
+ } else {
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct inquiry_data data;
struct inquiry_info *info = (void *) (skb->data + 1);
@@ -896,9 +1923,14 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
- for (; num_rsp; num_rsp--) {
+ for (; num_rsp; num_rsp--, info++) {
+ bool name_known, ssp;
+
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -907,14 +1939,17 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
data.clock_offset = info->clock_offset;
data.rssi = 0x00;
data.ssp_mode = 0x00;
- info++;
- hci_inquiry_cache_update(hdev, &data);
+
+ name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, 0, !name_known, ssp, NULL,
+ 0, NULL, 0);
}
hci_dev_unlock(hdev);
}
-static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -941,11 +1976,15 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG;
hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ if (!conn->out && !hci_conn_ssp_enabled(conn) &&
+ !hci_find_link_key(hdev, &ev->bdaddr))
+ conn->disc_timeout = HCI_PAIRING_TIMEOUT;
+ else
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
} else
conn->state = BT_CONNECTED;
- hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
if (test_bit(HCI_AUTH, &hdev->flags))
@@ -959,19 +1998,23 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
struct hci_cp_read_remote_features cp;
cp.handle = ev->handle;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
/* Set packet type for incoming connection */
- if (!conn->out && hdev->hci_ver < 3) {
+ if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
+ &cp);
}
- } else
+ } else {
conn->state = BT_CLOSED;
+ if (conn->type == ACL_LINK)
+ mgmt_connect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, ev->status);
+ }
if (conn->type == ACL_LINK)
hci_sco_setup(conn, ev->status);
@@ -988,17 +2031,20 @@ unlock:
hci_conn_check_pending(hdev);
}
-static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
int mask = hdev->link_mode;
+ __u8 flags = 0;
- BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
- batostr(&ev->bdaddr), ev->link_type);
+ BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
+ ev->link_type);
- mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
+ mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
+ &flags);
- if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+ if ((mask & HCI_LM_ACCEPT) &&
+ !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
@@ -1009,7 +2055,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
if (ie)
memcpy(ie->data.dev_class, ev->dev_class, 3);
- conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
+ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+ &ev->bdaddr);
if (!conn) {
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
@@ -1020,12 +2067,13 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
}
memcpy(conn->dev_class, ev->dev_class, 3);
- conn->state = BT_CONNECT;
hci_dev_unlock(hdev);
- if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
+ if (ev->link_type == ACL_LINK ||
+ (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
struct hci_cp_accept_conn_req cp;
+ conn->state = BT_CONNECT;
bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -1034,10 +2082,11 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
else
cp.role = 0x01; /* Remain slave */
- hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
- sizeof(cp), &cp);
- } else {
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
+ &cp);
+ } else if (!(flags & HCI_PROTO_DEFER)) {
struct hci_cp_accept_sync_conn_req cp;
+ conn->state = BT_CONNECT;
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -1049,99 +2098,175 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
cp.retrans_effort = 0xff;
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
+ } else {
+ conn->state = BT_CONNECT2;
+ hci_proto_connect_cfm(conn, 0);
}
} else {
/* Connection rejected */
struct hci_cp_reject_conn_req cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
- cp.reason = 0x0f;
+ cp.reason = HCI_ERROR_REJ_BAD_ADDR;
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
}
-static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_to_mgmt_reason(u8 err)
+{
+ switch (err) {
+ case HCI_ERROR_CONNECTION_TIMEOUT:
+ return MGMT_DEV_DISCONN_TIMEOUT;
+ case HCI_ERROR_REMOTE_USER_TERM:
+ case HCI_ERROR_REMOTE_LOW_RESOURCES:
+ case HCI_ERROR_REMOTE_POWER_OFF:
+ return MGMT_DEV_DISCONN_REMOTE;
+ case HCI_ERROR_LOCAL_HOST_TERM:
+ return MGMT_DEV_DISCONN_LOCAL_HOST;
+ default:
+ return MGMT_DEV_DISCONN_UNKNOWN;
+ }
+}
+
+static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_disconn_complete *ev = (void *) skb->data;
+ u8 reason = hci_to_mgmt_reason(ev->reason);
+ struct hci_conn_params *params;
struct hci_conn *conn;
+ bool mgmt_connected;
+ u8 type;
- BT_DBG("%s status %d", hdev->name, ev->status);
-
- if (ev->status)
- return;
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- conn->state = BT_CLOSED;
+ if (!conn)
+ goto unlock;
- hci_proto_disconn_cfm(conn, ev->reason);
- hci_conn_del(conn);
+ if (ev->status) {
+ mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, ev->status);
+ goto unlock;
}
+ conn->state = BT_CLOSED;
+
+ mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
+ mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
+ reason, mgmt_connected);
+
+ if (conn->type == ACL_LINK && conn->flush_key)
+ hci_remove_link_key(hdev, &conn->dst);
+
+ params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+ if (params) {
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_LINK_LOSS:
+ if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
+ break;
+ /* Fall through */
+
+ case HCI_AUTO_CONN_ALWAYS:
+ hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ type = conn->type;
+
+ hci_proto_disconn_cfm(conn, ev->reason);
+ hci_conn_del(conn);
+
+ /* Re-enable advertising if necessary, since it might
+ * have been disabled by the connection. From the
+ * HCI_LE_Set_Advertise_Enable command description in
+ * the core specification (v4.0):
+ * "The Controller shall continue advertising until the Host
+ * issues an LE_Set_Advertise_Enable command with
+ * Advertising_Enable set to 0x00 (Advertising is disabled)
+ * or until a connection is created or until the Advertising
+ * is timed out due to Directed Advertising."
+ */
+ if (type == LE_LINK)
+ mgmt_reenable_advertising(hdev);
+
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_auth_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- if (!ev->status) {
+ if (!conn)
+ goto unlock;
+
+ if (!ev->status) {
+ if (!hci_conn_ssp_enabled(conn) &&
+ test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
+ BT_INFO("re-auth of legacy device is not possible.");
+ } else {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
- } else
- conn->sec_level = BT_SECURITY_LOW;
+ }
+ } else {
+ mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ ev->status);
+ }
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
+ clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
- if (conn->state == BT_CONFIG) {
- if (!ev->status && hdev->ssp_mode > 0 &&
- conn->ssp_mode > 0) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
- sizeof(cp), &cp);
- } else {
- conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
- }
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status && hci_conn_ssp_enabled(conn)) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+ &cp);
} else {
- hci_auth_cfm(conn, ev->status);
-
- hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_put(conn);
+ conn->state = BT_CONNECTED;
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_drop(conn);
}
+ } else {
+ hci_auth_cfm(conn, ev->status);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
- if (!ev->status) {
- struct hci_cp_set_conn_encrypt cp;
- cp.handle = ev->handle;
- cp.encrypt = 0x01;
- hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
- sizeof(cp), &cp);
- } else {
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
- hci_encrypt_cfm(conn, ev->status, 0x00);
- }
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_drop(conn);
+ }
+
+ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
+ if (!ev->status) {
+ struct hci_cp_set_conn_encrypt cp;
+ cp.handle = ev->handle;
+ cp.encrypt = 0x01;
+ hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
+ &cp);
+ } else {
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+ hci_encrypt_cfm(conn, ev->status, 0x00);
}
}
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_name *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1153,56 +2278,106 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn && hci_outgoing_auth_needed(hdev, conn)) {
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ goto check_auth;
+
+ if (ev->status == 0)
+ hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
+ strnlen(ev->name, HCI_MAX_NAME_LENGTH));
+ else
+ hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
+
+check_auth:
+ if (!conn)
+ goto unlock;
+
+ if (!hci_outgoing_auth_needed(hdev, conn))
+ goto unlock;
+
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_auth_requested cp;
cp.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
}
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_encrypt_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- if (!ev->status) {
- if (ev->encrypt) {
- /* Encryption implies authentication */
- conn->link_mode |= HCI_LM_AUTH;
- conn->link_mode |= HCI_LM_ENCRYPT;
- } else
- conn->link_mode &= ~HCI_LM_ENCRYPT;
- }
+ if (!conn)
+ goto unlock;
- clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ if (!ev->status) {
+ if (ev->encrypt) {
+ /* Encryption implies authentication */
+ conn->link_mode |= HCI_LM_AUTH;
+ conn->link_mode |= HCI_LM_ENCRYPT;
+ conn->sec_level = conn->pending_sec_level;
- if (conn->state == BT_CONFIG) {
- if (!ev->status)
- conn->state = BT_CONNECTED;
+ /* P-256 authentication key implies FIPS */
+ if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
+ conn->link_mode |= HCI_LM_FIPS;
- hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
- } else
- hci_encrypt_cfm(conn, ev->status, ev->encrypt);
+ if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
+ conn->type == LE_LINK)
+ set_bit(HCI_CONN_AES_CCM, &conn->flags);
+ } else {
+ conn->link_mode &= ~HCI_LM_ENCRYPT;
+ clear_bit(HCI_CONN_AES_CCM, &conn->flags);
+ }
}
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+ if (ev->status && conn->state == BT_CONNECTED) {
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status)
+ conn->state = BT_CONNECTED;
+
+ /* In Secure Connections Only mode, do not allow any
+ * connections that are not encrypted with AES-CCM
+ * using a P-256 authenticated combination key.
+ */
+ if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
+ (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
+ conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
+ hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_drop(conn);
+ } else
+ hci_encrypt_cfm(conn, ev->status, ev->encrypt);
+
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1211,7 +2386,7 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
if (!ev->status)
conn->link_mode |= HCI_LM_SECURE;
- clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
hci_key_change_cfm(conn, ev->status);
}
@@ -1219,12 +2394,13 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
hci_dev_unlock(hdev);
}
-static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_features *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1233,7 +2409,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
goto unlock;
if (!ev->status)
- memcpy(conn->features, ev->features, 8);
+ memcpy(conn->features[0], ev->features, 8);
if (conn->state != BT_CONFIG)
goto unlock;
@@ -1243,41 +2419,35 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
cp.handle = ev->handle;
cp.page = 0x01;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
goto unlock;
}
- if (!ev->status) {
+ if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- }
+ } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0,
+ conn->dev_class);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- BT_DBG("%s", hdev->name);
-}
-
-static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- BT_DBG("%s", hdev->name);
-}
-
-static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_complete *ev = (void *) skb->data;
+ u8 status = skb->data[sizeof(*ev)];
__u16 opcode;
skb_pull(skb, sizeof(*ev));
@@ -1289,6 +2459,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_inquiry_cancel(hdev, skb);
break;
+ case HCI_OP_PERIODIC_INQ:
+ hci_cc_periodic_inq(hdev, skb);
+ break;
+
case HCI_OP_EXIT_PERIODIC_INQ:
hci_cc_exit_periodic_inq(hdev, skb);
break;
@@ -1357,18 +2531,18 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_write_voice_setting(hdev, skb);
break;
- case HCI_OP_HOST_BUFFER_SIZE:
- hci_cc_host_buffer_size(hdev, skb);
- break;
-
- case HCI_OP_READ_SSP_MODE:
- hci_cc_read_ssp_mode(hdev, skb);
+ case HCI_OP_READ_NUM_SUPPORTED_IAC:
+ hci_cc_read_num_supported_iac(hdev, skb);
break;
case HCI_OP_WRITE_SSP_MODE:
hci_cc_write_ssp_mode(hdev, skb);
break;
+ case HCI_OP_WRITE_SC_SUPPORT:
+ hci_cc_write_sc_support(hdev, skb);
+ break;
+
case HCI_OP_READ_LOCAL_VERSION:
hci_cc_read_local_version(hdev, skb);
break;
@@ -1381,6 +2555,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_read_local_features(hdev, skb);
break;
+ case HCI_OP_READ_LOCAL_EXT_FEATURES:
+ hci_cc_read_local_ext_features(hdev, skb);
+ break;
+
case HCI_OP_READ_BUFFER_SIZE:
hci_cc_read_buffer_size(hdev, skb);
break;
@@ -1389,23 +2567,160 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_read_bd_addr(hdev, skb);
break;
- case HCI_OP_WRITE_CA_TIMEOUT:
- hci_cc_write_ca_timeout(hdev, skb);
+ case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
+ hci_cc_read_page_scan_activity(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
+ hci_cc_write_page_scan_activity(hdev, skb);
+ break;
+
+ case HCI_OP_READ_PAGE_SCAN_TYPE:
+ hci_cc_read_page_scan_type(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_PAGE_SCAN_TYPE:
+ hci_cc_write_page_scan_type(hdev, skb);
+ break;
+
+ case HCI_OP_READ_DATA_BLOCK_SIZE:
+ hci_cc_read_data_block_size(hdev, skb);
+ break;
+
+ case HCI_OP_READ_FLOW_CONTROL_MODE:
+ hci_cc_read_flow_control_mode(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_AMP_INFO:
+ hci_cc_read_local_amp_info(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_AMP_ASSOC:
+ hci_cc_read_local_amp_assoc(hdev, skb);
+ break;
+
+ case HCI_OP_READ_INQ_RSP_TX_POWER:
+ hci_cc_read_inq_rsp_tx_power(hdev, skb);
+ break;
+
+ case HCI_OP_PIN_CODE_REPLY:
+ hci_cc_pin_code_reply(hdev, skb);
+ break;
+
+ case HCI_OP_PIN_CODE_NEG_REPLY:
+ hci_cc_pin_code_neg_reply(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_OOB_DATA:
+ hci_cc_read_local_oob_data(hdev, skb);
+ break;
+
+ case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
+ hci_cc_read_local_oob_ext_data(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_BUFFER_SIZE:
+ hci_cc_le_read_buffer_size(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_LOCAL_FEATURES:
+ hci_cc_le_read_local_features(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_ADV_TX_POWER:
+ hci_cc_le_read_adv_tx_power(hdev, skb);
+ break;
+
+ case HCI_OP_USER_CONFIRM_REPLY:
+ hci_cc_user_confirm_reply(hdev, skb);
+ break;
+
+ case HCI_OP_USER_CONFIRM_NEG_REPLY:
+ hci_cc_user_confirm_neg_reply(hdev, skb);
+ break;
+
+ case HCI_OP_USER_PASSKEY_REPLY:
+ hci_cc_user_passkey_reply(hdev, skb);
+ break;
+
+ case HCI_OP_USER_PASSKEY_NEG_REPLY:
+ hci_cc_user_passkey_neg_reply(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_RANDOM_ADDR:
+ hci_cc_le_set_random_addr(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_ADV_ENABLE:
+ hci_cc_le_set_adv_enable(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_SCAN_PARAM:
+ hci_cc_le_set_scan_param(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_SCAN_ENABLE:
+ hci_cc_le_set_scan_enable(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_WHITE_LIST_SIZE:
+ hci_cc_le_read_white_list_size(hdev, skb);
+ break;
+
+ case HCI_OP_LE_CLEAR_WHITE_LIST:
+ hci_cc_le_clear_white_list(hdev, skb);
+ break;
+
+ case HCI_OP_LE_ADD_TO_WHITE_LIST:
+ hci_cc_le_add_to_white_list(hdev, skb);
+ break;
+
+ case HCI_OP_LE_DEL_FROM_WHITE_LIST:
+ hci_cc_le_del_from_white_list(hdev, skb);
+ break;
+
+ case HCI_OP_LE_READ_SUPPORTED_STATES:
+ hci_cc_le_read_supported_states(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_LE_HOST_SUPPORTED:
+ hci_cc_write_le_host_supported(hdev, skb);
+ break;
+
+ case HCI_OP_LE_SET_ADV_PARAM:
+ hci_cc_set_adv_param(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
+ hci_cc_write_remote_amp_assoc(hdev, skb);
+ break;
+
+ case HCI_OP_READ_RSSI:
+ hci_cc_read_rssi(hdev, skb);
+ break;
+
+ case HCI_OP_READ_TX_POWER:
+ hci_cc_read_tx_power(hdev, skb);
break;
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
- if (ev->ncmd) {
+ if (opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
+ hci_req_cmd_complete(hdev, opcode, status);
+
+ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
-static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_status *ev = (void *) skb->data;
__u16 opcode;
@@ -1459,24 +2774,51 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_cs_exit_sniff_mode(hdev, ev->status);
break;
+ case HCI_OP_DISCONNECT:
+ hci_cs_disconnect(hdev, ev->status);
+ break;
+
+ case HCI_OP_CREATE_PHY_LINK:
+ hci_cs_create_phylink(hdev, ev->status);
+ break;
+
+ case HCI_OP_ACCEPT_PHY_LINK:
+ hci_cs_accept_phylink(hdev, ev->status);
+ break;
+
+ case HCI_OP_LE_CREATE_CONN:
+ hci_cs_le_create_conn(hdev, ev->status);
+ break;
+
+ case HCI_OP_LE_START_ENC:
+ hci_cs_le_start_enc(hdev, ev->status);
+ break;
+
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
- if (ev->ncmd) {
+ if (opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
+ if (ev->status ||
+ (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
+ hci_req_cmd_complete(hdev, opcode, ev->status);
+
+ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
}
-static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_role_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1489,7 +2831,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
conn->link_mode |= HCI_LM_MASTER;
}
- clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
+ clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
hci_role_switch_cfm(conn, ev->status, ev->role);
}
@@ -1497,80 +2839,172 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_unlock(hdev);
}
-static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
- __le16 *ptr;
int i;
- skb_pull(skb, sizeof(*ev));
-
- BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
+ BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
+ return;
+ }
- if (skb->len < ev->num_hndl * 4) {
+ if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
+ ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
- tasklet_disable(&hdev->tx_task);
+ BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
- for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
+ for (i = 0; i < ev->num_hndl; i++) {
+ struct hci_comp_pkts_info *info = &ev->handles[i];
struct hci_conn *conn;
__u16 handle, count;
- handle = get_unaligned_le16(ptr++);
- count = get_unaligned_le16(ptr++);
+ handle = __le16_to_cpu(info->handle);
+ count = __le16_to_cpu(info->count);
conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (conn) {
- conn->sent -= count;
-
- if (conn->type == ACL_LINK) {
+ if (!conn)
+ continue;
+
+ conn->sent -= count;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
+ break;
+
+ case LE_LINK:
+ if (hdev->le_pkts) {
+ hdev->le_cnt += count;
+ if (hdev->le_cnt > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ } else {
hdev->acl_cnt += count;
if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
- } else {
- hdev->sco_cnt += count;
- if (hdev->sco_cnt > hdev->sco_pkts)
- hdev->sco_cnt = hdev->sco_pkts;
}
+ break;
+
+ case SCO_LINK:
+ hdev->sco_cnt += count;
+ if (hdev->sco_cnt > hdev->sco_pkts)
+ hdev->sco_cnt = hdev->sco_pkts;
+ break;
+
+ default:
+ BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ break;
}
}
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
+}
+
+static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
+ __u16 handle)
+{
+ struct hci_chan *chan;
+
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ return hci_conn_hash_lookup_handle(hdev, handle);
+ case HCI_AMP:
+ chan = hci_chan_lookup_handle(hdev, handle);
+ if (chan)
+ return chan->conn;
+ break;
+ default:
+ BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
+ break;
+ }
- tasklet_enable(&hdev->tx_task);
+ return NULL;
}
-static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
+ int i;
+
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
+ BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
+ return;
+ }
+
+ if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
+ ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
+ BT_DBG("%s bad parameters", hdev->name);
+ return;
+ }
+
+ BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
+ ev->num_hndl);
+
+ for (i = 0; i < ev->num_hndl; i++) {
+ struct hci_comp_blocks_info *info = &ev->handles[i];
+ struct hci_conn *conn = NULL;
+ __u16 handle, block_count;
+
+ handle = __le16_to_cpu(info->handle);
+ block_count = __le16_to_cpu(info->blocks);
+
+ conn = __hci_conn_lookup_handle(hdev, handle);
+ if (!conn)
+ continue;
+
+ conn->sent -= block_count;
+
+ switch (conn->type) {
+ case ACL_LINK:
+ case AMP_LINK:
+ hdev->block_cnt += block_count;
+ if (hdev->block_cnt > hdev->num_blocks)
+ hdev->block_cnt = hdev->num_blocks;
+ break;
+
+ default:
+ BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ break;
+ }
+ }
+
+ queue_work(hdev->workqueue, &hdev->tx_work);
+}
+
+static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_mode_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
if (conn) {
conn->mode = ev->mode;
- conn->interval = __le16_to_cpu(ev->interval);
- if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+ if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
+ &conn->flags)) {
if (conn->mode == HCI_CM_ACTIVE)
- conn->power_save = 1;
+ set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
else
- conn->power_save = 0;
+ clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
}
- if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
+ if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
hci_sco_setup(conn, ev->status);
}
hci_dev_unlock(hdev);
}
-static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pin_code_req *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1580,24 +3014,103 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn && conn->state == BT_CONNECTED) {
+ if (!conn)
+ goto unlock;
+
+ if (conn->state == BT_CONNECTED) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
+ if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
+ hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+ else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
+ u8 secure;
+
+ if (conn->pending_sec_level == BT_SECURITY_HIGH)
+ secure = 1;
+ else
+ secure = 0;
+
+ mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
+ struct hci_ev_link_key_req *ev = (void *) skb->data;
+ struct hci_cp_link_key_reply cp;
+ struct hci_conn *conn;
+ struct link_key *key;
+
BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ return;
+
+ hci_dev_lock(hdev);
+
+ key = hci_find_link_key(hdev, &ev->bdaddr);
+ if (!key) {
+ BT_DBG("%s link key not found for %pMR", hdev->name,
+ &ev->bdaddr);
+ goto not_found;
+ }
+
+ BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
+ &ev->bdaddr);
+
+ if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
+ key->type == HCI_LK_DEBUG_COMBINATION) {
+ BT_DBG("%s ignoring debug key", hdev->name);
+ goto not_found;
+ }
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (conn) {
+ if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
+ key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
+ conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
+ BT_DBG("%s ignoring unauthenticated key", hdev->name);
+ goto not_found;
+ }
+
+ if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
+ (conn->pending_sec_level == BT_SECURITY_HIGH ||
+ conn->pending_sec_level == BT_SECURITY_FIPS)) {
+ BT_DBG("%s ignoring key unauthenticated for high security",
+ hdev->name);
+ goto not_found;
+ }
+
+ conn->key_type = key->type;
+ conn->pin_length = key->pin_len;
+ }
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
+
+ hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
+
+ hci_dev_unlock(hdev);
+
+ return;
+
+not_found:
+ hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
+ hci_dev_unlock(hdev);
}
-static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_notify *ev = (void *) skb->data;
struct hci_conn *conn;
+ u8 pin_len = 0;
BT_DBG("%s", hdev->name);
@@ -1607,18 +3120,27 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
if (conn) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- hci_conn_put(conn);
+ pin_len = conn->pin_length;
+
+ if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
+ conn->key_type = ev->key_type;
+
+ hci_conn_drop(conn);
}
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
+ ev->key_type, pin_len);
+
hci_dev_unlock(hdev);
}
-static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_clock_offset *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1636,12 +3158,12 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_dev_unlock(hdev);
}
-static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pkt_type_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1652,7 +3174,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_unlock(hdev);
}
-static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
struct inquiry_entry *ie;
@@ -1670,22 +3192,28 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
+ bool name_known, ssp;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
- struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
+ struct inquiry_info_with_rssi_and_pscan_mode *info;
+ info = (void *) (skb->data + 1);
- for (; num_rsp; num_rsp--) {
+ for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -1694,13 +3222,17 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
- info++;
- hci_inquiry_cache_update(hdev, &data);
+
+ name_known = hci_inquiry_cache_update(hdev, &data,
+ false, &ssp);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi,
+ !name_known, ssp, NULL, 0, NULL, 0);
}
} else {
struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
- for (; num_rsp; num_rsp--) {
+ for (; num_rsp; num_rsp--, info++) {
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -1709,15 +3241,19 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
data.clock_offset = info->clock_offset;
data.rssi = info->rssi;
data.ssp_mode = 0x00;
- info++;
- hci_inquiry_cache_update(hdev, &data);
+ name_known = hci_inquiry_cache_update(hdev, &data,
+ false, &ssp);
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi,
+ !name_known, ssp, NULL, 0, NULL, 0);
}
}
hci_dev_unlock(hdev);
}
-static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_ext_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_ext_features *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1730,43 +3266,65 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
if (!conn)
goto unlock;
+ if (ev->page < HCI_MAX_PAGES)
+ memcpy(conn->features[ev->page], ev->features, 8);
+
if (!ev->status && ev->page == 0x01) {
struct inquiry_entry *ie;
ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & 0x01);
+ ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
+
+ if (ev->features[0] & LMP_HOST_SSP) {
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+ } else {
+ /* It is mandatory by the Bluetooth specification that
+ * Extended Inquiry Results are only used when Secure
+ * Simple Pairing is enabled, but some devices violate
+ * this.
+ *
+ * To make these devices work, the internal SSP
+ * enabled flag needs to be cleared if the remote host
+ * features do not indicate SSP support */
+ clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+ }
- conn->ssp_mode = (ev->features[0] & 0x01);
+ if (ev->features[0] & LMP_HOST_SC)
+ set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
}
if (conn->state != BT_CONFIG)
goto unlock;
- if (!ev->status) {
+ if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
struct hci_cp_remote_name_req cp;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- }
+ } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0,
+ conn->dev_class);
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
+ hci_conn_drop(conn);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1787,19 +3345,20 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
- hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
break;
+ case 0x0d: /* Connection Rejected due to Limited Resources */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
- if (conn->out && conn->attempt < 2) {
+ case 0x20: /* Unsupported LMP Parameter value */
+ if (conn->out) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
- hci_setup_sync(conn, conn->link->handle);
- goto unlock;
+ if (hci_setup_sync(conn, conn->link->handle))
+ goto unlock;
}
/* fall through */
@@ -1816,57 +3375,140 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- BT_DBG("%s", hdev->name);
-}
-
-static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static inline size_t eir_get_length(u8 *eir, size_t eir_len)
{
- struct hci_ev_sniff_subrate *ev = (void *) skb->data;
- struct hci_conn *conn;
+ size_t parsed = 0;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ while (parsed < eir_len) {
+ u8 field_len = eir[0];
- hci_dev_lock(hdev);
+ if (field_len == 0)
+ return parsed;
- conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
+ parsed += field_len + 1;
+ eir += field_len + 1;
}
- hci_dev_unlock(hdev);
+ return eir_len;
}
-static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
+ size_t eir_len;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
- for (; num_rsp; num_rsp--) {
+ for (; num_rsp; num_rsp--, info++) {
+ bool name_known, ssp;
+
bacpy(&data.bdaddr, &info->bdaddr);
- data.pscan_rep_mode = info->pscan_rep_mode;
- data.pscan_period_mode = info->pscan_period_mode;
- data.pscan_mode = 0x00;
+ data.pscan_rep_mode = info->pscan_rep_mode;
+ data.pscan_period_mode = info->pscan_period_mode;
+ data.pscan_mode = 0x00;
memcpy(data.dev_class, info->dev_class, 3);
- data.clock_offset = info->clock_offset;
- data.rssi = info->rssi;
+ data.clock_offset = info->clock_offset;
+ data.rssi = info->rssi;
data.ssp_mode = 0x01;
- info++;
- hci_inquiry_cache_update(hdev, &data);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ name_known = eir_has_data_type(info->data,
+ sizeof(info->data),
+ EIR_NAME_COMPLETE);
+ else
+ name_known = true;
+
+ name_known = hci_inquiry_cache_update(hdev, &data, name_known,
+ &ssp);
+ eir_len = eir_get_length(info->data, sizeof(info->data));
+ mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+ info->dev_class, info->rssi, !name_known,
+ ssp, info->data, eir_len, NULL, 0);
}
hci_dev_unlock(hdev);
}
-static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
+ __le16_to_cpu(ev->handle));
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+ if (!conn)
+ goto unlock;
+
+ /* For BR/EDR the necessary steps are taken through the
+ * auth_complete event.
+ */
+ if (conn->type != LE_LINK)
+ goto unlock;
+
+ if (!ev->status)
+ conn->sec_level = conn->pending_sec_level;
+
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+
+ if (ev->status && conn->state == BT_CONNECTED) {
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
+ if (conn->state == BT_CONFIG) {
+ if (!ev->status)
+ conn->state = BT_CONNECTED;
+
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_drop(conn);
+ } else {
+ hci_auth_cfm(conn, ev->status);
+
+ hci_conn_hold(conn);
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_drop(conn);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static u8 hci_get_auth_req(struct hci_conn *conn)
+{
+ /* If remote requests no-bonding follow that lead */
+ if (conn->remote_auth == HCI_AT_NO_BONDING ||
+ conn->remote_auth == HCI_AT_NO_BONDING_MITM)
+ return conn->remote_auth | (conn->auth_type & 0x01);
+
+ /* If both remote and local have enough IO capabilities, require
+ * MITM protection
+ */
+ if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
+ conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
+ return conn->remote_auth | 0x01;
+
+ /* No MITM protection possible so ignore remote requirement */
+ return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
+}
+
+static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1876,13 +3518,229 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn)
- hci_conn_hold(conn);
+ if (!conn)
+ goto unlock;
+
+ hci_conn_hold(conn);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ goto unlock;
+
+ if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
+ (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+ struct hci_cp_io_capability_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ /* Change the IO capability from KeyboardDisplay
+ * to DisplayYesNo as it is not supported by BT spec. */
+ cp.capability = (conn->io_capability == 0x04) ?
+ HCI_IO_DISPLAY_YESNO : conn->io_capability;
+
+ /* If we are initiators, there is no remote information yet */
+ if (conn->remote_auth == 0xff) {
+ cp.authentication = conn->auth_type;
+
+ /* Request MITM protection if our IO caps allow it
+ * except for the no-bonding case.
+ * conn->auth_type is not updated here since
+ * that might cause the user confirmation to be
+ * rejected in case the remote doesn't have the
+ * IO capabilities for MITM.
+ */
+ if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
+ cp.authentication != HCI_AT_NO_BONDING)
+ cp.authentication |= 0x01;
+ } else {
+ conn->auth_type = hci_get_auth_req(conn);
+ cp.authentication = conn->auth_type;
+ }
+ if (hci_find_remote_oob_data(hdev, &conn->dst) &&
+ (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
+ cp.oob_data = 0x01;
+ else
+ cp.oob_data = 0x00;
+
+ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
+ sizeof(cp), &cp);
+ } else {
+ struct hci_cp_io_capability_neg_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
+
+ hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
+ sizeof(cp), &cp);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_io_capa_reply *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ goto unlock;
+
+ conn->remote_cap = ev->capability;
+ conn->remote_auth = ev->authentication;
+ if (ev->oob_data)
+ set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_confirm_req *ev = (void *) skb->data;
+ int loc_mitm, rem_mitm, confirm_hint = 0;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ goto unlock;
+
+ loc_mitm = (conn->auth_type & 0x01);
+ rem_mitm = (conn->remote_auth & 0x01);
+
+ /* If we require MITM but the remote device can't provide that
+ * (it has NoInputNoOutput) then reject the confirmation request
+ */
+ if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
+ BT_DBG("Rejecting request: remote device can't provide MITM");
+ hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+ goto unlock;
+ }
+
+ /* If no side requires MITM protection; auto-accept */
+ if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
+ (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
+
+ /* If we're not the initiators request authorization to
+ * proceed from user space (mgmt_user_confirm with
+ * confirm_hint set to 1). The exception is if neither
+ * side had MITM in which case we do auto-accept.
+ */
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
+ (loc_mitm || rem_mitm)) {
+ BT_DBG("Confirming auto-accept as acceptor");
+ confirm_hint = 1;
+ goto confirm;
+ }
+
+ BT_DBG("Auto-accept of user confirmation with %ums delay",
+ hdev->auto_accept_delay);
+
+ if (hdev->auto_accept_delay > 0) {
+ int delay = msecs_to_jiffies(hdev->auto_accept_delay);
+ queue_delayed_work(conn->hdev->workqueue,
+ &conn->auto_accept_work, delay);
+ goto unlock;
+ }
+
+ hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
+ sizeof(ev->bdaddr), &ev->bdaddr);
+ goto unlock;
+ }
+
+confirm:
+ mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
+ le32_to_cpu(ev->passkey), confirm_hint);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_user_passkey_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+
+ BT_DBG("%s", hdev->name);
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
+}
+
+static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ return;
+
+ conn->passkey_notify = __le32_to_cpu(ev->passkey);
+ conn->passkey_entered = 0;
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+ conn->dst_type, conn->passkey_notify,
+ conn->passkey_entered);
+}
+
+static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_keypress_notify *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
+ BT_DBG("%s", hdev->name);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (!conn)
+ return;
+
+ switch (ev->type) {
+ case HCI_KEYPRESS_STARTED:
+ conn->passkey_entered = 0;
+ return;
+
+ case HCI_KEYPRESS_ENTERED:
+ conn->passkey_entered++;
+ break;
+
+ case HCI_KEYPRESS_ERASED:
+ conn->passkey_entered--;
+ break;
+
+ case HCI_KEYPRESS_CLEARED:
+ conn->passkey_entered = 0;
+ break;
+
+ case HCI_KEYPRESS_COMPLETED:
+ return;
+ }
+
+ if (test_bit(HCI_MGMT, &hdev->dev_flags))
+ mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
+ conn->dst_type, conn->passkey_notify,
+ conn->passkey_entered);
+}
+
+static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1892,35 +3750,578 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn)
- hci_conn_put(conn);
+ if (!conn)
+ goto unlock;
+
+ /* To avoid duplicate auth_failed events to user space we check
+ * the HCI_CONN_AUTH_PEND flag which will be set if we
+ * initiated the authentication. A traditional auth_complete
+ * event gets always produced as initiator and is also mapped to
+ * the mgmt_auth_failed event */
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
+ mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+ ev->status);
+
+ hci_conn_drop(conn);
+unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_host_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_host_features *ev = (void *) skb->data;
struct inquiry_entry *ie;
+ struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (conn)
+ memcpy(conn->features[1], ev->features, 8);
+
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie)
- ie->data.ssp_mode = (ev->features[0] & 0x01);
+ ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
+ struct oob_data *data;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ goto unlock;
+
+ data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
+ if (data) {
+ if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
+ struct hci_cp_remote_oob_ext_data_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
+ memcpy(cp.randomizer192, data->randomizer192,
+ sizeof(cp.randomizer192));
+ memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
+ memcpy(cp.randomizer256, data->randomizer256,
+ sizeof(cp.randomizer256));
+
+ hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
+ sizeof(cp), &cp);
+ } else {
+ struct hci_cp_remote_oob_data_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ memcpy(cp.hash, data->hash192, sizeof(cp.hash));
+ memcpy(cp.randomizer, data->randomizer192,
+ sizeof(cp.randomizer));
+
+ hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
+ sizeof(cp), &cp);
+ }
+ } else {
+ struct hci_cp_remote_oob_data_neg_reply cp;
+
+ bacpy(&cp.bdaddr, &ev->bdaddr);
+ hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+ sizeof(cp), &cp);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_phy_link_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_phy_link_complete *ev = (void *) skb->data;
+ struct hci_conn *hcon, *bredr_hcon;
+
+ BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
+ ev->status);
+
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ if (ev->status) {
+ hci_conn_del(hcon);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
+
+ hcon->state = BT_CONNECTED;
+ bacpy(&hcon->dst, &bredr_hcon->dst);
+
+ hci_conn_hold(hcon);
+ hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+ hci_conn_drop(hcon);
+
+ hci_conn_add_sysfs(hcon);
+
+ amp_physical_cfm(bredr_hcon, hcon);
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_logical_link_complete *ev = (void *) skb->data;
+ struct hci_conn *hcon;
+ struct hci_chan *hchan;
+ struct amp_mgr *mgr;
+
+ BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
+ hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
+ ev->status);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon)
+ return;
+
+ /* Create AMP hchan */
+ hchan = hci_chan_create(hcon);
+ if (!hchan)
+ return;
+
+ hchan->handle = le16_to_cpu(ev->handle);
+
+ BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+
+ mgr = hcon->amp_mgr;
+ if (mgr && mgr->bredr_chan) {
+ struct l2cap_chan *bredr_chan = mgr->bredr_chan;
+
+ l2cap_chan_lock(bredr_chan);
+
+ bredr_chan->conn->mtu = hdev->block_mtu;
+ l2cap_logical_cfm(bredr_chan, hchan, 0);
+ hci_conn_hold(hcon);
+
+ l2cap_chan_unlock(bredr_chan);
+ }
+}
+
+static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
+ struct hci_chan *hchan;
+
+ BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
+ le16_to_cpu(ev->handle), ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
+ if (!hchan)
+ goto unlock;
+
+ amp_destroy_logical_link(hchan, ev->reason);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
+ struct hci_conn *hcon;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ if (ev->status)
+ return;
+
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (hcon) {
+ hcon->state = BT_CLOSED;
+ hci_conn_del(hcon);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+ struct hci_conn *conn;
+ struct smp_irk *irk;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (!conn) {
+ conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+ if (!conn) {
+ BT_ERR("No memory for new connection");
+ goto unlock;
+ }
+
+ conn->dst_type = ev->bdaddr_type;
+
+ if (ev->role == LE_CONN_ROLE_MASTER) {
+ conn->out = true;
+ conn->link_mode |= HCI_LM_MASTER;
+ }
+
+ /* If we didn't have a hci_conn object previously
+ * but we're in master role this must be something
+ * initiated using a white list. Since white list based
+ * connections are not "first class citizens" we don't
+ * have full tracking of them. Therefore, we go ahead
+ * with a "best effort" approach of determining the
+ * initiator address based on the HCI_PRIVACY flag.
+ */
+ if (conn->out) {
+ conn->resp_addr_type = ev->bdaddr_type;
+ bacpy(&conn->resp_addr, &ev->bdaddr);
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
+ bacpy(&conn->init_addr, &hdev->rpa);
+ } else {
+ hci_copy_identity_address(hdev,
+ &conn->init_addr,
+ &conn->init_addr_type);
+ }
+ }
+ } else {
+ cancel_delayed_work(&conn->le_conn_timeout);
+ }
+
+ if (!conn->out) {
+ /* Set the responder (our side) address type based on
+ * the advertising address type.
+ */
+ conn->resp_addr_type = hdev->adv_addr_type;
+ if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
+ bacpy(&conn->resp_addr, &hdev->random_addr);
+ else
+ bacpy(&conn->resp_addr, &hdev->bdaddr);
+ conn->init_addr_type = ev->bdaddr_type;
+ bacpy(&conn->init_addr, &ev->bdaddr);
+ }
+
+ /* Lookup the identity address from the stored connection
+ * address and address type.
+ *
+ * When establishing connections to an identity address, the
+ * connection procedure will store the resolvable random
+ * address first. Now if it can be converted back into the
+ * identity address, start using the identity address from
+ * now on.
+ */
+ irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
+ if (irk) {
+ bacpy(&conn->dst, &irk->bdaddr);
+ conn->dst_type = irk->addr_type;
+ }
+
+ if (ev->status) {
+ hci_le_conn_failed(conn, ev->status);
+ goto unlock;
+ }
+
+ if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ mgmt_device_connected(hdev, &conn->dst, conn->type,
+ conn->dst_type, 0, NULL, 0, NULL);
+
+ conn->sec_level = BT_SECURITY_LOW;
+ conn->handle = __le16_to_cpu(ev->handle);
+ conn->state = BT_CONNECTED;
+
+ if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
+ set_bit(HCI_CONN_6LOWPAN, &conn->flags);
+
+ hci_conn_add_sysfs(conn);
+
+ hci_proto_connect_cfm(conn, ev->status);
+
+ hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
+
+unlock:
hci_dev_unlock(hdev);
}
+/* This function requires the caller holds hdev->lock */
+static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
+ u8 addr_type)
+{
+ struct hci_conn *conn;
+ struct smp_irk *irk;
+
+ /* If this is a resolvable address, we should resolve it and then
+ * update address and address type variables.
+ */
+ irk = hci_get_irk(hdev, addr, addr_type);
+ if (irk) {
+ addr = &irk->bdaddr;
+ addr_type = irk->addr_type;
+ }
+
+ if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
+ return;
+
+ conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
+ HCI_AT_NO_BONDING);
+ if (!IS_ERR(conn))
+ return;
+
+ switch (PTR_ERR(conn)) {
+ case -EBUSY:
+ /* If hci_connect() returns -EBUSY it means there is already
+ * an LE connection attempt going on. Since controllers don't
+ * support more than one connection attempt at the time, we
+ * don't consider this an error case.
+ */
+ break;
+ default:
+ BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
+ }
+}
+
+static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+{
+ struct discovery_state *d = &hdev->discovery;
+ bool match;
+
+ /* Passive scanning shouldn't trigger any device found events */
+ if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
+ if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
+ check_pending_le_conn(hdev, bdaddr, bdaddr_type);
+ return;
+ }
+
+ /* If there's nothing pending either store the data from this
+ * event or send an immediate device found event if the data
+ * should not be stored for later.
+ */
+ if (!has_pending_adv_report(hdev)) {
+ /* If the report will trigger a SCAN_REQ store it for
+ * later merging.
+ */
+ if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+ store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+ rssi, data, len);
+ return;
+ }
+
+ mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+ rssi, 0, 1, data, len, NULL, 0);
+ return;
+ }
+
+ /* Check if the pending report is for the same device as the new one */
+ match = (!bacmp(bdaddr, &d->last_adv_addr) &&
+ bdaddr_type == d->last_adv_addr_type);
+
+ /* If the pending data doesn't match this report or this isn't a
+ * scan response (e.g. we got a duplicate ADV_IND) then force
+ * sending of the pending data.
+ */
+ if (type != LE_ADV_SCAN_RSP || !match) {
+ /* Send out whatever is in the cache, but skip duplicates */
+ if (!match)
+ mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+ d->last_adv_addr_type, NULL,
+ d->last_adv_rssi, 0, 1,
+ d->last_adv_data,
+ d->last_adv_data_len, NULL, 0);
+
+ /* If the new report will trigger a SCAN_REQ store it for
+ * later merging.
+ */
+ if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+ store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+ rssi, data, len);
+ return;
+ }
+
+ /* The advertising reports cannot be merged, so clear
+ * the pending report and send out a device found event.
+ */
+ clear_pending_adv_report(hdev);
+ mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+ rssi, 0, 1, data, len, NULL, 0);
+ return;
+ }
+
+ /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
+ * the new event is a SCAN_RSP. We can therefore proceed with
+ * sending a merged device found event.
+ */
+ mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+ d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
+ d->last_adv_data, d->last_adv_data_len);
+ clear_pending_adv_report(hdev);
+}
+
+static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u8 num_reports = skb->data[0];
+ void *ptr = &skb->data[1];
+
+ hci_dev_lock(hdev);
+
+ while (num_reports--) {
+ struct hci_ev_le_advertising_info *ev = ptr;
+ s8 rssi;
+
+ rssi = ev->data[ev->length];
+ process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
+ ev->bdaddr_type, rssi, ev->data, ev->length);
+
+ ptr += sizeof(*ev) + ev->length + 1;
+ }
+
+ hci_dev_unlock(hdev);
+}
+
+static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_ltk_req *ev = (void *) skb->data;
+ struct hci_cp_le_ltk_reply cp;
+ struct hci_cp_le_ltk_neg_reply neg;
+ struct hci_conn *conn;
+ struct smp_ltk *ltk;
+
+ BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+ if (conn == NULL)
+ goto not_found;
+
+ ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
+ if (ltk == NULL)
+ goto not_found;
+
+ memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
+ cp.handle = cpu_to_le16(conn->handle);
+
+ if (ltk->authenticated)
+ conn->pending_sec_level = BT_SECURITY_HIGH;
+ else
+ conn->pending_sec_level = BT_SECURITY_MEDIUM;
+
+ conn->enc_key_size = ltk->enc_size;
+
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
+
+ /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
+ * temporary key used to encrypt a connection following
+ * pairing. It is used during the Encrypted Session Setup to
+ * distribute the keys. Later, security can be re-established
+ * using a distributed LTK.
+ */
+ if (ltk->type == HCI_SMP_STK_SLAVE) {
+ list_del(&ltk->list);
+ kfree(ltk);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return;
+
+not_found:
+ neg.handle = ev->handle;
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
+ hci_dev_unlock(hdev);
+}
+
+static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_le_meta *le_ev = (void *) skb->data;
+
+ skb_pull(skb, sizeof(*le_ev));
+
+ switch (le_ev->subevent) {
+ case HCI_EV_LE_CONN_COMPLETE:
+ hci_le_conn_complete_evt(hdev, skb);
+ break;
+
+ case HCI_EV_LE_ADVERTISING_REPORT:
+ hci_le_adv_report_evt(hdev, skb);
+ break;
+
+ case HCI_EV_LE_LTK_REQ:
+ hci_le_ltk_request_evt(hdev, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_ev_channel_selected *ev = (void *) skb->data;
+ struct hci_conn *hcon;
+
+ BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+ skb_pull(skb, sizeof(*ev));
+
+ hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+ if (!hcon)
+ return;
+
+ amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
__u8 event = hdr->evt;
+ hci_dev_lock(hdev);
+
+ /* Received events are (currently) only needed when a request is
+ * ongoing so avoid unnecessary memory allocation.
+ */
+ if (hdev->req_status == HCI_REQ_PEND) {
+ kfree_skb(hdev->recv_evt);
+ hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
+ }
+
+ hci_dev_unlock(hdev);
+
skb_pull(skb, HCI_EVENT_HDR_SIZE);
+ if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
+ struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
+
+ hci_req_cmd_complete(hdev, opcode, 0);
+ }
+
switch (event) {
case HCI_EV_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
@@ -1962,14 +4363,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_remote_features_evt(hdev, skb);
break;
- case HCI_EV_REMOTE_VERSION:
- hci_remote_version_evt(hdev, skb);
- break;
-
- case HCI_EV_QOS_SETUP_COMPLETE:
- hci_qos_setup_complete_evt(hdev, skb);
- break;
-
case HCI_EV_CMD_COMPLETE:
hci_cmd_complete_evt(hdev, skb);
break;
@@ -2026,22 +4419,38 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_sync_conn_complete_evt(hdev, skb);
break;
- case HCI_EV_SYNC_CONN_CHANGED:
- hci_sync_conn_changed_evt(hdev, skb);
- break;
-
- case HCI_EV_SNIFF_SUBRATE:
- hci_sniff_subrate_evt(hdev, skb);
- break;
-
case HCI_EV_EXTENDED_INQUIRY_RESULT:
hci_extended_inquiry_result_evt(hdev, skb);
break;
+ case HCI_EV_KEY_REFRESH_COMPLETE:
+ hci_key_refresh_complete_evt(hdev, skb);
+ break;
+
case HCI_EV_IO_CAPA_REQUEST:
hci_io_capa_request_evt(hdev, skb);
break;
+ case HCI_EV_IO_CAPA_REPLY:
+ hci_io_capa_reply_evt(hdev, skb);
+ break;
+
+ case HCI_EV_USER_CONFIRM_REQUEST:
+ hci_user_confirm_request_evt(hdev, skb);
+ break;
+
+ case HCI_EV_USER_PASSKEY_REQUEST:
+ hci_user_passkey_request_evt(hdev, skb);
+ break;
+
+ case HCI_EV_USER_PASSKEY_NOTIFY:
+ hci_user_passkey_notify_evt(hdev, skb);
+ break;
+
+ case HCI_EV_KEYPRESS_NOTIFY:
+ hci_keypress_notify_evt(hdev, skb);
+ break;
+
case HCI_EV_SIMPLE_PAIR_COMPLETE:
hci_simple_pair_complete_evt(hdev, skb);
break;
@@ -2050,39 +4459,43 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_remote_host_features_evt(hdev, skb);
break;
- default:
- BT_DBG("%s event 0x%x", hdev->name, event);
+ case HCI_EV_LE_META:
+ hci_le_meta_evt(hdev, skb);
break;
- }
- kfree_skb(skb);
- hdev->stat.evt_rx++;
-}
+ case HCI_EV_CHANNEL_SELECTED:
+ hci_chan_selected_evt(hdev, skb);
+ break;
-/* Generate internal stack event */
-void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
-{
- struct hci_event_hdr *hdr;
- struct hci_ev_stack_internal *ev;
- struct sk_buff *skb;
+ case HCI_EV_REMOTE_OOB_DATA_REQUEST:
+ hci_remote_oob_data_request_evt(hdev, skb);
+ break;
- skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
- if (!skb)
- return;
+ case HCI_EV_PHY_LINK_COMPLETE:
+ hci_phy_link_complete_evt(hdev, skb);
+ break;
- hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
- hdr->evt = HCI_EV_STACK_INTERNAL;
- hdr->plen = sizeof(*ev) + dlen;
+ case HCI_EV_LOGICAL_LINK_COMPLETE:
+ hci_loglink_complete_evt(hdev, skb);
+ break;
+
+ case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
+ hci_disconn_loglink_complete_evt(hdev, skb);
+ break;
+
+ case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
+ hci_disconn_phylink_complete_evt(hdev, skb);
+ break;
- ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
- ev->type = type;
- memcpy(ev->data, data, dlen);
+ case HCI_EV_NUM_COMP_BLOCKS:
+ hci_num_comp_blocks_evt(hdev, skb);
+ break;
- bt_cb(skb)->incoming = 1;
- __net_timestamp(skb);
+ default:
+ BT_DBG("%s event 0x%2.2x", hdev->name, event);
+ break;
+ }
- bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
- skb->dev = (void *) hdev;
- hci_send_to_sock(hdev, skb);
kfree_skb(skb);
+ hdev->stat.evt_rx++;
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 29827c77f6c..80d25c150a6 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,32 +24,14 @@
/* Bluetooth HCI sockets. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/compat.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_mon.h>
-static int enable_mgmt;
+static atomic_t monitor_promisc = ATOMIC_INIT(0);
/* ----- HCI socket interface ----- */
@@ -84,17 +66,57 @@ static struct bt_sock_list hci_sk_list = {
.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
};
+static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
+{
+ struct hci_filter *flt;
+ int flt_type, flt_event;
+
+ /* Apply filter */
+ flt = &hci_pi(sk)->filter;
+
+ if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
+ flt_type = 0;
+ else
+ flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
+
+ if (!test_bit(flt_type, &flt->type_mask))
+ return true;
+
+ /* Extra filter for event packets only */
+ if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
+ return false;
+
+ flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+
+ if (!hci_test_bit(flt_event, &flt->event_mask))
+ return true;
+
+ /* Check filter only when opcode is set */
+ if (!flt->opcode)
+ return false;
+
+ if (flt_event == HCI_EV_CMD_COMPLETE &&
+ flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
+ return true;
+
+ if (flt_event == HCI_EV_CMD_STATUS &&
+ flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
+ return true;
+
+ return false;
+}
+
/* Send frame to RAW socket */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
{
struct sock *sk;
- struct hlist_node *node;
+ struct sk_buff *skb_copy = NULL;
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
- struct hci_filter *flt;
+
+ sk_for_each(sk, &hci_sk_list.head) {
struct sk_buff *nskb;
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
@@ -104,178 +126,405 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
if (skb->sk == sk)
continue;
- if (bt_cb(skb)->channel != hci_pi(sk)->channel)
+ if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
+ if (is_filtered_packet(sk, skb))
+ continue;
+ } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+ if (!bt_cb(skb)->incoming)
+ continue;
+ if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+ bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+ bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
+ continue;
+ } else {
+ /* Don't send frame to other channel types */
+ continue;
+ }
+
+ if (!skb_copy) {
+ /* Create a private copy with headroom */
+ skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
+ if (!skb_copy)
+ continue;
+
+ /* Put type byte before the data */
+ memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
+ }
+
+ nskb = skb_clone(skb_copy, GFP_ATOMIC);
+ if (!nskb)
continue;
- if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
- goto clone;
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
- /* Apply filter */
- flt = &hci_pi(sk)->filter;
+ read_unlock(&hci_sk_list.lock);
+
+ kfree_skb(skb_copy);
+}
+
+/* Send frame to control socket */
+void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
+{
+ struct sock *sk;
+
+ BT_DBG("len %d", skb->len);
+
+ read_lock(&hci_sk_list.lock);
- if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
- 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ /* Skip the original socket */
+ if (sk == skip_sk)
+ continue;
+
+ if (sk->sk_state != BT_BOUND)
continue;
- if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
- register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+ if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
+ continue;
- if (!hci_test_bit(evt, &flt->event_mask))
- continue;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
- if (flt->opcode &&
- ((evt == HCI_EV_CMD_COMPLETE &&
- flt->opcode !=
- get_unaligned((__le16 *)(skb->data + 3))) ||
- (evt == HCI_EV_CMD_STATUS &&
- flt->opcode !=
- get_unaligned((__le16 *)(skb->data + 4)))))
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
+/* Send frame to monitor socket */
+void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct sock *sk;
+ struct sk_buff *skb_copy = NULL;
+ __le16 opcode;
+
+ if (!atomic_read(&monitor_promisc))
+ return;
+
+ BT_DBG("hdev %p len %d", hdev, skb->len);
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
+ break;
+ case HCI_EVENT_PKT:
+ opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
+ break;
+ case HCI_ACLDATA_PKT:
+ if (bt_cb(skb)->incoming)
+ opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
+ else
+ opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
+ break;
+ case HCI_SCODATA_PKT:
+ if (bt_cb(skb)->incoming)
+ opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
+ else
+ opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
+ break;
+ default:
+ return;
+ }
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ continue;
+
+ if (!skb_copy) {
+ struct hci_mon_hdr *hdr;
+
+ /* Create a private copy with headroom */
+ skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
+ GFP_ATOMIC, true);
+ if (!skb_copy)
continue;
+
+ /* Put header before the data */
+ hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
+ hdr->opcode = opcode;
+ hdr->index = cpu_to_le16(hdev->id);
+ hdr->len = cpu_to_le16(skb->len);
}
-clone:
- nskb = skb_clone(skb, GFP_ATOMIC);
+ nskb = skb_clone(skb_copy, GFP_ATOMIC);
if (!nskb)
continue;
- /* Put type byte before the data */
- if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
- memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
+ if (sock_queue_rcv_skb(sk, nskb))
+ kfree_skb(nskb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+
+ kfree_skb(skb_copy);
+}
+
+static void send_monitor_event(struct sk_buff *skb)
+{
+ struct sock *sk;
+
+ BT_DBG("len %d", skb->len);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+ if (sk->sk_state != BT_BOUND)
+ continue;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+ continue;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ continue;
if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
+
read_unlock(&hci_sk_list.lock);
}
-static int hci_sock_release(struct socket *sock)
+static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
{
- struct sock *sk = sock->sk;
- struct hci_dev *hdev;
+ struct hci_mon_hdr *hdr;
+ struct hci_mon_new_index *ni;
+ struct sk_buff *skb;
+ __le16 opcode;
- BT_DBG("sock %p sk %p", sock, sk);
+ switch (event) {
+ case HCI_DEV_REG:
+ skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
- if (!sk)
- return 0;
+ ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+ ni->type = hdev->dev_type;
+ ni->bus = hdev->bus;
+ bacpy(&ni->bdaddr, &hdev->bdaddr);
+ memcpy(ni->name, hdev->name, 8);
- hdev = hci_pi(sk)->hdev;
+ opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
+ break;
- bt_sock_unlink(&hci_sk_list, sk);
+ case HCI_DEV_UNREG:
+ skb = bt_skb_alloc(0, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
- if (hdev) {
- atomic_dec(&hdev->promisc);
- hci_dev_put(hdev);
+ opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
+ break;
+
+ default:
+ return NULL;
}
- sock_orphan(sk);
+ __net_timestamp(skb);
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
+ hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = opcode;
+ hdr->index = cpu_to_le16(hdev->id);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
- sock_put(sk);
- return 0;
+ return skb;
}
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+static void send_monitor_replay(struct sock *sk)
{
- struct list_head *p;
+ struct hci_dev *hdev;
+
+ read_lock(&hci_dev_list_lock);
- list_for_each(p, &hdev->blacklist) {
- struct bdaddr_list *b;
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ struct sk_buff *skb;
- b = list_entry(p, struct bdaddr_list, list);
+ skb = create_monitor_event(hdev, HCI_DEV_REG);
+ if (!skb)
+ continue;
- if (bacmp(bdaddr, &b->bdaddr) == 0)
- return b;
+ if (sock_queue_rcv_skb(sk, skb))
+ kfree_skb(skb);
}
- return NULL;
+ read_unlock(&hci_dev_list_lock);
}
-static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
+/* Generate internal stack event */
+static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
{
- bdaddr_t bdaddr;
- struct bdaddr_list *entry;
+ struct hci_event_hdr *hdr;
+ struct hci_ev_stack_internal *ev;
+ struct sk_buff *skb;
- if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
- return -EFAULT;
+ skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
+ if (!skb)
+ return;
- if (bacmp(&bdaddr, BDADDR_ANY) == 0)
- return -EBADF;
+ hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
+ hdr->evt = HCI_EV_STACK_INTERNAL;
+ hdr->plen = sizeof(*ev) + dlen;
- if (hci_blacklist_lookup(hdev, &bdaddr))
- return -EEXIST;
+ ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
+ ev->type = type;
+ memcpy(ev->data, data, dlen);
- entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ bt_cb(skb)->incoming = 1;
+ __net_timestamp(skb);
+
+ bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+ hci_send_to_sock(hdev, skb);
+ kfree_skb(skb);
+}
+
+void hci_sock_dev_event(struct hci_dev *hdev, int event)
+{
+ struct hci_ev_si_device ev;
- bacpy(&entry->bdaddr, &bdaddr);
+ BT_DBG("hdev %s event %d", hdev->name, event);
- list_add(&entry->list, &hdev->blacklist);
+ /* Send event to monitor */
+ if (atomic_read(&monitor_promisc)) {
+ struct sk_buff *skb;
- return 0;
+ skb = create_monitor_event(hdev, event);
+ if (skb) {
+ send_monitor_event(skb);
+ kfree_skb(skb);
+ }
+ }
+
+ /* Send event to sockets */
+ ev.event = event;
+ ev.dev_id = hdev->id;
+ hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+
+ if (event == HCI_DEV_UNREG) {
+ struct sock *sk;
+
+ /* Detach sockets from device */
+ read_lock(&hci_sk_list.lock);
+ sk_for_each(sk, &hci_sk_list.head) {
+ bh_lock_sock_nested(sk);
+ if (hci_pi(sk)->hdev == hdev) {
+ hci_pi(sk)->hdev = NULL;
+ sk->sk_err = EPIPE;
+ sk->sk_state = BT_OPEN;
+ sk->sk_state_change(sk);
+
+ hci_dev_put(hdev);
+ }
+ bh_unlock_sock(sk);
+ }
+ read_unlock(&hci_sk_list.lock);
+ }
}
-int hci_blacklist_clear(struct hci_dev *hdev)
+static int hci_sock_release(struct socket *sock)
{
- struct list_head *p, *n;
+ struct sock *sk = sock->sk;
+ struct hci_dev *hdev;
- list_for_each_safe(p, n, &hdev->blacklist) {
- struct bdaddr_list *b;
+ BT_DBG("sock %p sk %p", sock, sk);
- b = list_entry(p, struct bdaddr_list, list);
+ if (!sk)
+ return 0;
- list_del(p);
- kfree(b);
+ hdev = hci_pi(sk)->hdev;
+
+ if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
+ atomic_dec(&monitor_promisc);
+
+ bt_sock_unlink(&hci_sk_list, sk);
+
+ if (hdev) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+ mgmt_index_added(hdev);
+ clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+ hci_dev_close(hdev->id);
+ }
+
+ atomic_dec(&hdev->promisc);
+ hci_dev_put(hdev);
}
+ sock_orphan(sk);
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
+ sock_put(sk);
return 0;
}
-static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
+static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
- struct bdaddr_list *entry;
+ int err;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- if (bacmp(&bdaddr, BDADDR_ANY) == 0)
- return hci_blacklist_clear(hdev);
+ hci_dev_lock(hdev);
- entry = hci_blacklist_lookup(hdev, &bdaddr);
- if (!entry)
- return -ENOENT;
+ err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
- list_del(&entry->list);
- kfree(entry);
+ hci_dev_unlock(hdev);
- return 0;
+ return err;
+}
+
+static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
+{
+ bdaddr_t bdaddr;
+ int err;
+
+ if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
+ return -EFAULT;
+
+ hci_dev_lock(hdev);
+
+ err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
+
+ hci_dev_unlock(hdev);
+
+ return err;
}
/* Ioctls that require bound socket */
-static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
+ unsigned long arg)
{
struct hci_dev *hdev = hci_pi(sk)->hdev;
if (!hdev)
return -EBADFD;
+ if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+ return -EBUSY;
+
+ if (hdev->dev_type != HCI_BREDR)
+ return -EOPNOTSUPP;
+
switch (cmd) {
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
-
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return -EPERM;
-
- if (arg)
- set_bit(HCI_RAW, &hdev->flags);
- else
- clear_bit(HCI_RAW, &hdev->flags);
-
- return 0;
+ return -EOPNOTSUPP;
case HCIGETCONNINFO:
return hci_get_conn_info(hdev, (void __user *) arg);
@@ -285,29 +534,36 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
case HCIBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return hci_blacklist_add(hdev, (void __user *) arg);
+ return -EPERM;
+ return hci_sock_blacklist_add(hdev, (void __user *) arg);
case HCIUNBLOCKADDR:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
- return hci_blacklist_del(hdev, (void __user *) arg);
-
- default:
- if (hdev->ioctl)
- return hdev->ioctl(hdev, cmd, arg);
- return -EINVAL;
+ return -EPERM;
+ return hci_sock_blacklist_del(hdev, (void __user *) arg);
}
+
+ return -ENOIOCTLCMD;
}
-static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
- struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
+ struct sock *sk = sock->sk;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
+ lock_sock(sk);
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ release_sock(sk);
+
switch (cmd) {
case HCIGETDEVLIST:
return hci_get_dev_list(argp);
@@ -320,22 +576,22 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
case HCIDEVUP:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_open(arg);
case HCIDEVDOWN:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_close(arg);
case HCIDEVRESET:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_reset(arg);
case HCIDEVRESTAT:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_reset_stat(arg);
case HCISETSCAN:
@@ -347,21 +603,24 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
case HCISETACLMTU:
case HCISETSCOMTU:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
return hci_dev_cmd(cmd, argp);
case HCIINQUIRY:
return hci_inquiry(argp);
-
- default:
- lock_sock(sk);
- err = hci_sock_bound_ioctl(sk, cmd, arg);
- release_sock(sk);
- return err;
}
+
+ lock_sock(sk);
+
+ err = hci_sock_bound_ioctl(sk, cmd, arg);
+
+done:
+ release_sock(sk);
+ return err;
}
-static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
{
struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
@@ -380,31 +639,120 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
if (haddr.hci_family != AF_BLUETOOTH)
return -EINVAL;
- if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
- return -EINVAL;
-
- if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
- return -EINVAL;
-
lock_sock(sk);
- if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
+ if (sk->sk_state == BT_BOUND) {
err = -EALREADY;
goto done;
}
- if (haddr.hci_dev != HCI_DEV_NONE) {
+ switch (haddr.hci_channel) {
+ case HCI_CHANNEL_RAW:
+ if (hci_pi(sk)->hdev) {
+ err = -EALREADY;
+ goto done;
+ }
+
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ hdev = hci_dev_get(haddr.hci_dev);
+ if (!hdev) {
+ err = -ENODEV;
+ goto done;
+ }
+
+ atomic_inc(&hdev->promisc);
+ }
+
+ hci_pi(sk)->hdev = hdev;
+ break;
+
+ case HCI_CHANNEL_USER:
+ if (hci_pi(sk)->hdev) {
+ err = -EALREADY;
+ goto done;
+ }
+
+ if (haddr.hci_dev == HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
+
hdev = hci_dev_get(haddr.hci_dev);
if (!hdev) {
err = -ENODEV;
goto done;
}
+ if (test_bit(HCI_UP, &hdev->flags) ||
+ test_bit(HCI_INIT, &hdev->flags) ||
+ test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ err = -EBUSY;
+ hci_dev_put(hdev);
+ goto done;
+ }
+
+ if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = -EUSERS;
+ hci_dev_put(hdev);
+ goto done;
+ }
+
+ mgmt_index_removed(hdev);
+
+ err = hci_dev_open(hdev->id);
+ if (err) {
+ clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+ mgmt_index_added(hdev);
+ hci_dev_put(hdev);
+ goto done;
+ }
+
atomic_inc(&hdev->promisc);
+
+ hci_pi(sk)->hdev = hdev;
+ break;
+
+ case HCI_CHANNEL_CONTROL:
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ break;
+
+ case HCI_CHANNEL_MONITOR:
+ if (haddr.hci_dev != HCI_DEV_NONE) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (!capable(CAP_NET_RAW)) {
+ err = -EPERM;
+ goto done;
+ }
+
+ send_monitor_replay(sk);
+
+ atomic_inc(&monitor_promisc);
+ break;
+
+ default:
+ err = -EINVAL;
+ goto done;
}
+
hci_pi(sk)->channel = haddr.hci_channel;
- hci_pi(sk)->hdev = hdev;
sk->sk_state = BT_BOUND;
done:
@@ -412,34 +760,46 @@ done:
return err;
}
-static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
+static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *addr_len, int peer)
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
- struct hci_dev *hdev = hci_pi(sk)->hdev;
+ struct hci_dev *hdev;
+ int err = 0;
BT_DBG("sock %p sk %p", sock, sk);
- if (!hdev)
- return -EBADFD;
+ if (peer)
+ return -EOPNOTSUPP;
lock_sock(sk);
+ hdev = hci_pi(sk)->hdev;
+ if (!hdev) {
+ err = -EBADFD;
+ goto done;
+ }
+
*addr_len = sizeof(*haddr);
haddr->hci_family = AF_BLUETOOTH;
haddr->hci_dev = hdev->id;
+ haddr->hci_channel= hci_pi(sk)->channel;
+done:
release_sock(sk);
- return 0;
+ return err;
}
-static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb)
{
__u32 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR) {
int incoming = bt_cb(skb)->incoming;
- put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
+ put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
+ &incoming);
}
if (mask & HCI_CMSG_TSTAMP) {
@@ -455,7 +815,8 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
data = &tv;
len = sizeof(tv);
#ifdef CONFIG_COMPAT
- if (msg->msg_flags & MSG_CMSG_COMPAT) {
+ if (!COMPAT_USE_64BIT_TIME &&
+ (msg->msg_flags & MSG_CMSG_COMPAT)) {
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
data = &ctv;
@@ -468,7 +829,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
}
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+ struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -487,8 +848,6 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
return err;
- msg->msg_namelen = 0;
-
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
@@ -498,7 +857,16 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- hci_sock_cmsg(sk, msg, skb);
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ hci_sock_cmsg(sk, msg, skb);
+ break;
+ case HCI_CHANNEL_USER:
+ case HCI_CHANNEL_CONTROL:
+ case HCI_CHANNEL_MONITOR:
+ sock_recv_timestamp(msg, sk, skb);
+ break;
+ }
skb_free_datagram(sk, skb);
@@ -528,10 +896,14 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_RAW:
+ case HCI_CHANNEL_USER:
break;
case HCI_CHANNEL_CONTROL:
err = mgmt_control(sk, msg, len);
goto done;
+ case HCI_CHANNEL_MONITOR:
+ err = -EOPNOTSUPP;
+ goto done;
default:
err = -EINVAL;
goto done;
@@ -559,26 +931,46 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
skb_pull(skb, 1);
- skb->dev = (void *) hdev;
- if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+ /* No permission check is needed for user channel
+ * since that gets enforced when binding the socket.
+ *
+ * However check that the packet type is valid.
+ */
+ if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+ bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+ bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+ err = -EINVAL;
+ goto drop;
+ }
+
+ skb_queue_tail(&hdev->raw_q, skb);
+ queue_work(hdev->workqueue, &hdev->tx_work);
+ } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
u16 opcode = get_unaligned_le16(skb->data);
u16 ogf = hci_opcode_ogf(opcode);
u16 ocf = hci_opcode_ocf(opcode);
if (((ogf > HCI_SFLT_MAX_OGF) ||
- !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
- !capable(CAP_NET_RAW)) {
+ !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
+ &hci_sec_filter.ocf_mask[ogf])) &&
+ !capable(CAP_NET_RAW)) {
err = -EPERM;
goto drop;
}
if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
} else {
+ /* Stand-alone HCI commands must be flaged as
+ * single-command requests.
+ */
+ bt_cb(skb)->req.start = true;
+
skb_queue_tail(&hdev->cmd_q, skb);
- tasklet_schedule(&hdev->cmd_task);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
}
} else {
if (!capable(CAP_NET_RAW)) {
@@ -587,7 +979,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
}
skb_queue_tail(&hdev->raw_q, skb);
- tasklet_schedule(&hdev->tx_task);
+ queue_work(hdev->workqueue, &hdev->tx_work);
}
err = len;
@@ -601,7 +993,8 @@ drop:
goto done;
}
-static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
+static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int len)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
@@ -611,6 +1004,11 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
lock_sock(sk);
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EBADFD;
+ goto done;
+ }
+
switch (optname) {
case HCI_DATA_DIR:
if (get_user(opt, (int __user *)optval)) {
@@ -673,19 +1071,30 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char
break;
}
+done:
release_sock(sk);
return err;
}
-static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
- int len, opt;
+ int len, opt, err = 0;
+
+ BT_DBG("sk %p, opt %d", sk, optname);
if (get_user(len, optlen))
return -EFAULT;
+ lock_sock(sk);
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+ err = -EBADFD;
+ goto done;
+ }
+
switch (optname) {
case HCI_DATA_DIR:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
@@ -694,7 +1103,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
opt = 0;
if (put_user(opt, optval))
- return -EFAULT;
+ err = -EFAULT;
break;
case HCI_TIME_STAMP:
@@ -704,13 +1113,14 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
opt = 0;
if (put_user(opt, optval))
- return -EFAULT;
+ err = -EFAULT;
break;
case HCI_FILTER:
{
struct hci_filter *f = &hci_pi(sk)->filter;
+ memset(&uf, 0, sizeof(uf));
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
@@ -719,15 +1129,17 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
len = min_t(unsigned int, len, sizeof(uf));
if (copy_to_user(optval, &uf, len))
- return -EFAULT;
+ err = -EFAULT;
break;
default:
- return -ENOPROTOOPT;
+ err = -ENOPROTOOPT;
break;
}
- return 0;
+done:
+ release_sock(sk);
+ return err;
}
static const struct proto_ops hci_sock_ops = {
@@ -785,54 +1197,12 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
return 0;
}
-static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- struct hci_dev *hdev = (struct hci_dev *) ptr;
- struct hci_ev_si_device ev;
-
- BT_DBG("hdev %s event %ld", hdev->name, event);
-
- /* Send event to sockets */
- ev.event = event;
- ev.dev_id = hdev->id;
- hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
-
- if (event == HCI_DEV_UNREG) {
- struct sock *sk;
- struct hlist_node *node;
-
- /* Detach sockets from device */
- read_lock(&hci_sk_list.lock);
- sk_for_each(sk, node, &hci_sk_list.head) {
- local_bh_disable();
- bh_lock_sock_nested(sk);
- if (hci_pi(sk)->hdev == hdev) {
- hci_pi(sk)->hdev = NULL;
- sk->sk_err = EPIPE;
- sk->sk_state = BT_OPEN;
- sk->sk_state_change(sk);
-
- hci_dev_put(hdev);
- }
- bh_unlock_sock(sk);
- local_bh_enable();
- }
- read_unlock(&hci_sk_list.lock);
- }
-
- return NOTIFY_DONE;
-}
-
static const struct net_proto_family hci_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = hci_sock_create,
};
-static struct notifier_block hci_sock_nblock = {
- .notifier_call = hci_sock_dev_event
-};
-
int __init hci_sock_init(void)
{
int err;
@@ -842,30 +1212,30 @@ int __init hci_sock_init(void)
return err;
err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("HCI socket registration failed");
goto error;
+ }
- hci_register_notifier(&hci_sock_nblock);
+ err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create HCI proc file");
+ bt_sock_unregister(BTPROTO_HCI);
+ goto error;
+ }
BT_INFO("HCI socket layer initialized");
return 0;
error:
- BT_ERR("HCI socket registration failed");
proto_unregister(&hci_sk_proto);
return err;
}
-void __exit hci_sock_cleanup(void)
+void hci_sock_cleanup(void)
{
- if (bt_sock_unregister(BTPROTO_HCI) < 0)
- BT_ERR("HCI socket unregistration failed");
-
- hci_unregister_notifier(&hci_sock_nblock);
-
+ bt_procfs_cleanup(&init_net, "hci");
+ bt_sock_unregister(BTPROTO_HCI);
proto_unregister(&hci_sk_proto);
}
-
-module_param(enable_mgmt, bool, 0644);
-MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 5fce3d6d07b..555982a78a5 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,19 +1,12 @@
/* Bluetooth HCI driver model support. */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
+#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
static struct class *bt_class;
-struct dentry *bt_debugfs = NULL;
-EXPORT_SYMBOL_GPL(bt_debugfs);
-
static inline char *link_typetostr(int type)
{
switch (type) {
@@ -23,61 +16,45 @@ static inline char *link_typetostr(int type)
return "SCO";
case ESCO_LINK:
return "eSCO";
+ case LE_LINK:
+ return "LE";
default:
return "UNKNOWN";
}
}
-static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
+ struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
-static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_conn *conn = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", batostr(&conn->dst));
-}
-
-static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_conn *conn = dev_get_drvdata(dev);
-
- return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- conn->features[0], conn->features[1],
- conn->features[2], conn->features[3],
- conn->features[4], conn->features[5],
- conn->features[6], conn->features[7]);
+ struct hci_conn *conn = to_hci_conn(dev);
+ return sprintf(buf, "%pMR\n", &conn->dst);
}
-#define LINK_ATTR(_name,_mode,_show,_store) \
-struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store)
+#define LINK_ATTR(_name, _mode, _show, _store) \
+struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
-static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
static struct attribute *bt_link_attrs[] = {
&link_attr_type.attr,
&link_attr_address.attr,
- &link_attr_features.attr,
NULL
};
-static struct attribute_group bt_link_group = {
- .attrs = bt_link_attrs,
-};
-
-static const struct attribute_group *bt_link_groups[] = {
- &bt_link_group,
- NULL
-};
+ATTRIBUTE_GROUPS(bt_link);
static void bt_link_release(struct device *dev)
{
- void *data = dev_get_drvdata(dev);
- kfree(data);
+ struct hci_conn *conn = to_hci_conn(dev);
+ kfree(conn);
}
static struct device_type bt_link = {
@@ -86,23 +63,6 @@ static struct device_type bt_link = {
.release = bt_link_release,
};
-static void add_conn(struct work_struct *work)
-{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
- struct hci_dev *hdev = conn->hdev;
-
- dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
-
- dev_set_drvdata(&conn->dev, conn);
-
- if (device_add(&conn->dev) < 0) {
- BT_ERR("Failed to register connection device");
- return;
- }
-
- hci_dev_hold(hdev);
-}
-
/*
* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
@@ -113,30 +73,6 @@ static int __match_tty(struct device *dev, void *data)
return !strncmp(dev_name(dev), "rfcomm", 6);
}
-static void del_conn(struct work_struct *work)
-{
- struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
- struct hci_dev *hdev = conn->hdev;
-
- if (!device_is_registered(&conn->dev))
- return;
-
- while (1) {
- struct device *dev;
-
- dev = device_find_child(&conn->dev, NULL, __match_tty);
- if (!dev)
- break;
- device_move(dev, NULL, DPM_ORDER_DEV_LAST);
- put_device(dev);
- }
-
- device_del(&conn->dev);
- put_device(&conn->dev);
-
- hci_dev_put(hdev);
-}
-
void hci_conn_init_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -148,45 +84,44 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
conn->dev.parent = &hdev->dev;
device_initialize(&conn->dev);
-
- INIT_WORK(&conn->work_add, add_conn);
- INIT_WORK(&conn->work_del, del_conn);
}
void hci_conn_add_sysfs(struct hci_conn *conn)
{
+ struct hci_dev *hdev = conn->hdev;
+
BT_DBG("conn %p", conn);
- queue_work(conn->hdev->workqueue, &conn->work_add);
+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
+
+ if (device_add(&conn->dev) < 0) {
+ BT_ERR("Failed to register connection device");
+ return;
+ }
+
+ hci_dev_hold(hdev);
}
void hci_conn_del_sysfs(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ struct hci_dev *hdev = conn->hdev;
- queue_work(conn->hdev->workqueue, &conn->work_del);
-}
+ if (!device_is_registered(&conn->dev))
+ return;
-static inline char *host_bustostr(int bus)
-{
- switch (bus) {
- case HCI_VIRTUAL:
- return "VIRTUAL";
- case HCI_USB:
- return "USB";
- case HCI_PCCARD:
- return "PCCARD";
- case HCI_UART:
- return "UART";
- case HCI_RS232:
- return "RS232";
- case HCI_PCI:
- return "PCI";
- case HCI_SDIO:
- return "SDIO";
- default:
- return "UNKNOWN";
+ while (1) {
+ struct device *dev;
+
+ dev = device_find_child(&conn->dev, NULL, __match_tty);
+ if (!dev)
+ break;
+ device_move(dev, NULL, DPM_ORDER_DEV_LAST);
+ put_device(dev);
}
+
+ device_del(&conn->dev);
+
+ hci_dev_put(hdev);
}
static inline char *host_typetostr(int type)
@@ -201,191 +136,52 @@ static inline char *host_typetostr(int type)
}
}
-static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
-}
-
-static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
-static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- char name[249];
+ struct hci_dev *hdev = to_hci_dev(dev);
+ char name[HCI_MAX_NAME_LENGTH + 1];
int i;
- for (i = 0; i < 248; i++)
+ for (i = 0; i < HCI_MAX_NAME_LENGTH; i++)
name[i] = hdev->dev_name[i];
- name[248] = '\0';
+ name[HCI_MAX_NAME_LENGTH] = '\0';
return sprintf(buf, "%s\n", name);
}
-static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "0x%.2x%.2x%.2x\n",
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
-}
-
-static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
-}
-
-static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
-
- return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
-}
-
-static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->manufacturer);
-}
-
-static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->hci_ver);
-}
-
-static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->hci_rev);
-}
-
-static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->idle_timeout);
-}
-
-static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- unsigned long val;
-
- if (strict_strtoul(buf, 0, &val) < 0)
- return -EINVAL;
-
- if (val != 0 && (val < 500 || val > 3600000))
- return -EINVAL;
-
- hdev->idle_timeout = val;
-
- return count;
-}
-
-static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->sniff_max_interval);
+ struct hci_dev *hdev = to_hci_dev(dev);
+ return sprintf(buf, "%pMR\n", &hdev->bdaddr);
}
-static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- unsigned long val;
-
- if (strict_strtoul(buf, 0, &val) < 0)
- return -EINVAL;
-
- if (val < 0x0002 || val > 0xFFFE || val % 2)
- return -EINVAL;
-
- if (val < hdev->sniff_min_interval)
- return -EINVAL;
-
- hdev->sniff_max_interval = val;
-
- return count;
-}
-
-static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->sniff_min_interval);
-}
-
-static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- unsigned long val;
-
- if (strict_strtoul(buf, 0, &val) < 0)
- return -EINVAL;
-
- if (val < 0x0002 || val > 0xFFFE || val % 2)
- return -EINVAL;
-
- if (val > hdev->sniff_max_interval)
- return -EINVAL;
-
- hdev->sniff_min_interval = val;
-
- return count;
-}
-
-static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
-static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
-static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
-static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
-static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
-
-static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
- show_idle_timeout, store_idle_timeout);
-static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
- show_sniff_max_interval, store_sniff_max_interval);
-static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
- show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
- &dev_attr_bus.attr,
&dev_attr_type.attr,
&dev_attr_name.attr,
- &dev_attr_class.attr,
&dev_attr_address.attr,
- &dev_attr_features.attr,
- &dev_attr_manufacturer.attr,
- &dev_attr_hci_version.attr,
- &dev_attr_hci_revision.attr,
- &dev_attr_idle_timeout.attr,
- &dev_attr_sniff_max_interval.attr,
- &dev_attr_sniff_min_interval.attr,
NULL
};
-static struct attribute_group bt_host_group = {
- .attrs = bt_host_attrs,
-};
-
-static const struct attribute_group *bt_host_groups[] = {
- &bt_host_group,
- NULL
-};
+ATTRIBUTE_GROUPS(bt_host);
static void bt_host_release(struct device *dev)
{
- void *data = dev_get_drvdata(dev);
- kfree(data);
+ struct hci_dev *hdev = to_hci_dev(dev);
+ kfree(hdev);
+ module_put(THIS_MODULE);
}
static struct device_type bt_host = {
@@ -394,131 +190,25 @@ static struct device_type bt_host = {
.release = bt_host_release,
};
-static int inquiry_cache_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct inquiry_cache *cache = &hdev->inq_cache;
- struct inquiry_entry *e;
-
- hci_dev_lock_bh(hdev);
-
- for (e = cache->list; e; e = e->next) {
- struct inquiry_data *data = &e->data;
- seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
- batostr(&data->bdaddr),
- data->pscan_rep_mode, data->pscan_period_mode,
- data->pscan_mode, data->dev_class[2],
- data->dev_class[1], data->dev_class[0],
- __le16_to_cpu(data->clock_offset),
- data->rssi, data->ssp_mode, e->timestamp);
- }
-
- hci_dev_unlock_bh(hdev);
-
- return 0;
-}
-
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
- return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
- .open = inquiry_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int blacklist_show(struct seq_file *f, void *p)
-{
- struct hci_dev *hdev = f->private;
- struct list_head *l;
-
- hci_dev_lock_bh(hdev);
-
- list_for_each(l, &hdev->blacklist) {
- struct bdaddr_list *b;
-
- b = list_entry(l, struct bdaddr_list, list);
-
- seq_printf(f, "%s\n", batostr(&b->bdaddr));
- }
-
- hci_dev_unlock_bh(hdev);
-
- return 0;
-}
-
-static int blacklist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
- .open = blacklist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-int hci_register_sysfs(struct hci_dev *hdev)
+void hci_init_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
- int err;
-
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
dev->type = &bt_host;
dev->class = bt_class;
- dev->parent = hdev->parent;
-
- dev_set_name(dev, "%s", hdev->name);
-
- dev_set_drvdata(dev, hdev);
-
- err = device_register(dev);
- if (err < 0)
- return err;
-
- if (!bt_debugfs)
- return 0;
- hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
- if (!hdev->debugfs)
- return 0;
-
- debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
- hdev, &inquiry_cache_fops);
-
- debugfs_create_file("blacklist", 0444, hdev->debugfs,
- hdev, &blacklist_fops);
-
- return 0;
-}
-
-void hci_unregister_sysfs(struct hci_dev *hdev)
-{
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
- debugfs_remove_recursive(hdev->debugfs);
-
- device_del(&hdev->dev);
+ __module_get(THIS_MODULE);
+ device_initialize(dev);
}
int __init bt_sysfs_init(void)
{
- bt_debugfs = debugfs_create_dir("bluetooth", NULL);
-
bt_class = class_create(THIS_MODULE, "bluetooth");
- if (IS_ERR(bt_class))
- return PTR_ERR(bt_class);
- return 0;
+ return PTR_ERR_OR_ZERO(bt_class);
}
void bt_sysfs_cleanup(void)
{
class_destroy(bt_class);
-
- debugfs_remove_recursive(bt_debugfs);
}
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 86a91543172..9332bc7aa85 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
config BT_HIDP
tristate "HIDP protocol support"
- depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
+ depends on BT && INPUT
select HID
help
HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 29544c21f4b..8181ea4bc2f 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1,6 +1,7 @@
/*
HIDP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org>
+ Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -20,26 +21,10 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/kref.h>
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/freezer.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <net/sock.h>
-
-#include <linux/input.h>
-#include <linux/hid.h>
+#include <linux/kthread.h>
#include <linux/hidraw.h>
#include <net/bluetooth/bluetooth.h>
@@ -54,138 +39,133 @@ static DECLARE_RWSEM(hidp_session_sem);
static LIST_HEAD(hidp_session_list);
static unsigned char hidp_keycode[256] = {
- 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
- 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
- 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
- 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64,
- 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106,
- 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71,
- 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190,
- 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113,
- 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0,
- 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113,
- 150,158,159,128,136,177,178,176,142,152,173,140
+ 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36,
+ 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45,
+ 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1,
+ 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52,
+ 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88,
+ 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69,
+ 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73,
+ 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190,
+ 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135,
+ 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94,
+ 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115,
+ 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140
};
static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
-static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
-{
- struct hidp_session *session;
- struct list_head *p;
+static int hidp_session_probe(struct l2cap_conn *conn,
+ struct l2cap_user *user);
+static void hidp_session_remove(struct l2cap_conn *conn,
+ struct l2cap_user *user);
+static int hidp_session_thread(void *arg);
+static void hidp_session_terminate(struct hidp_session *s);
- BT_DBG("");
-
- list_for_each(p, &hidp_session_list) {
- session = list_entry(p, struct hidp_session, list);
- if (!bacmp(bdaddr, &session->bdaddr))
- return session;
- }
- return NULL;
-}
-
-static void __hidp_link_session(struct hidp_session *session)
-{
- __module_get(THIS_MODULE);
- list_add(&session->list, &hidp_session_list);
-
- hci_conn_hold_device(session->conn);
-}
-
-static void __hidp_unlink_session(struct hidp_session *session)
-{
- hci_conn_put_device(session->conn);
-
- list_del(&session->list);
- module_put(THIS_MODULE);
-}
-
-static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
+static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
{
memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
ci->flags = session->flags;
- ci->state = session->state;
-
- ci->vendor = 0x0000;
- ci->product = 0x0000;
- ci->version = 0x0000;
+ ci->state = BT_CONNECTED;
if (session->input) {
ci->vendor = session->input->id.vendor;
ci->product = session->input->id.product;
ci->version = session->input->id.version;
if (session->input->name)
- strncpy(ci->name, session->input->name, 128);
+ strlcpy(ci->name, session->input->name, 128);
else
- strncpy(ci->name, "HID Boot Device", 128);
- }
-
- if (session->hid) {
+ strlcpy(ci->name, "HID Boot Device", 128);
+ } else if (session->hid) {
ci->vendor = session->hid->vendor;
ci->product = session->hid->product;
ci->version = session->hid->version;
- strncpy(ci->name, session->hid->name, 128);
+ strlcpy(ci->name, session->hid->name, 128);
}
}
-static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+/* assemble skb, queue message on @transmit and wake up the session thread */
+static int hidp_send_message(struct hidp_session *session, struct socket *sock,
+ struct sk_buff_head *transmit, unsigned char hdr,
+ const unsigned char *data, int size)
{
- unsigned char newleds;
struct sk_buff *skb;
+ struct sock *sk = sock->sk;
- BT_DBG("session %p type %d code %d value %d", session, type, code, value);
-
- if (type != EV_LED)
- return -1;
-
- newleds = (!!test_bit(LED_KANA, dev->led) << 3) |
- (!!test_bit(LED_COMPOSE, dev->led) << 3) |
- (!!test_bit(LED_SCROLLL, dev->led) << 2) |
- (!!test_bit(LED_CAPSL, dev->led) << 1) |
- (!!test_bit(LED_NUML, dev->led));
-
- if (session->leds == newleds)
- return 0;
+ BT_DBG("session %p data %p size %d", session, data, size);
- session->leds = newleds;
+ if (atomic_read(&session->terminate))
+ return -EIO;
- if (!(skb = alloc_skb(3, GFP_ATOMIC))) {
+ skb = alloc_skb(size + 1, GFP_ATOMIC);
+ if (!skb) {
BT_ERR("Can't allocate memory for new frame");
return -ENOMEM;
}
- *skb_put(skb, 1) = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
- *skb_put(skb, 1) = 0x01;
- *skb_put(skb, 1) = newleds;
-
- skb_queue_tail(&session->intr_transmit, skb);
+ *skb_put(skb, 1) = hdr;
+ if (data && size > 0)
+ memcpy(skb_put(skb, size), data, size);
- hidp_schedule(session);
+ skb_queue_tail(transmit, skb);
+ wake_up_interruptible(sk_sleep(sk));
return 0;
}
-static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
+static int hidp_send_ctrl_message(struct hidp_session *session,
+ unsigned char hdr, const unsigned char *data,
+ int size)
{
- struct hid_device *hid = input_get_drvdata(dev);
- struct hidp_session *session = hid->driver_data;
+ return hidp_send_message(session, session->ctrl_sock,
+ &session->ctrl_transmit, hdr, data, size);
+}
- return hidp_queue_event(session, dev, type, code, value);
+static int hidp_send_intr_message(struct hidp_session *session,
+ unsigned char hdr, const unsigned char *data,
+ int size)
+{
+ return hidp_send_message(session, session->intr_sock,
+ &session->intr_transmit, hdr, data, size);
}
-static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
+static int hidp_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
{
struct hidp_session *session = input_get_drvdata(dev);
+ unsigned char newleds;
+ unsigned char hdr, data[2];
+
+ BT_DBG("session %p type %d code %d value %d",
+ session, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ newleds = (!!test_bit(LED_KANA, dev->led) << 3) |
+ (!!test_bit(LED_COMPOSE, dev->led) << 3) |
+ (!!test_bit(LED_SCROLLL, dev->led) << 2) |
+ (!!test_bit(LED_CAPSL, dev->led) << 1) |
+ (!!test_bit(LED_NUML, dev->led));
- return hidp_queue_event(session, dev, type, code, value);
+ if (session->leds == newleds)
+ return 0;
+
+ session->leds = newleds;
+
+ hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
+ data[0] = 0x01;
+ data[1] = newleds;
+
+ return hidp_send_intr_message(session, hdr, data, 2);
}
static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
@@ -243,102 +223,184 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
input_sync(dev);
}
-static int __hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
+static int hidp_get_raw_report(struct hid_device *hid,
+ unsigned char report_number,
+ unsigned char *data, size_t count,
+ unsigned char report_type)
{
+ struct hidp_session *session = hid->driver_data;
struct sk_buff *skb;
+ size_t len;
+ int numbered_reports = hid->report_enum[report_type].numbered;
+ int ret;
- BT_DBG("session %p data %p size %d", session, data, size);
+ if (atomic_read(&session->terminate))
+ return -EIO;
- if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
- BT_ERR("Can't allocate memory for new frame");
- return -ENOMEM;
+ switch (report_type) {
+ case HID_FEATURE_REPORT:
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+ break;
+ case HID_INPUT_REPORT:
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT;
+ break;
+ case HID_OUTPUT_REPORT:
+ report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT;
+ break;
+ default:
+ return -EINVAL;
}
- *skb_put(skb, 1) = hdr;
- if (data && size > 0)
- memcpy(skb_put(skb, size), data, size);
+ if (mutex_lock_interruptible(&session->report_mutex))
+ return -ERESTARTSYS;
- skb_queue_tail(&session->ctrl_transmit, skb);
+ /* Set up our wait, and send the report request to the device. */
+ session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK;
+ session->waiting_report_number = numbered_reports ? report_number : -1;
+ set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ data[0] = report_number;
+ ret = hidp_send_ctrl_message(session, report_type, data, 1);
+ if (ret)
+ goto err;
- return 0;
-}
+ /* Wait for the return of the report. The returned report
+ gets put in session->report_return. */
+ while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
+ !atomic_read(&session->terminate)) {
+ int res;
+
+ res = wait_event_interruptible_timeout(session->report_queue,
+ !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)
+ || atomic_read(&session->terminate),
+ 5*HZ);
+ if (res == 0) {
+ /* timeout */
+ ret = -EIO;
+ goto err;
+ }
+ if (res < 0) {
+ /* signal */
+ ret = -ERESTARTSYS;
+ goto err;
+ }
+ }
-static inline int hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
-{
- int err;
+ skb = session->report_return;
+ if (skb) {
+ len = skb->len < count ? skb->len : count;
+ memcpy(data, skb->data, len);
+
+ kfree_skb(skb);
+ session->report_return = NULL;
+ } else {
+ /* Device returned a HANDSHAKE, indicating protocol error. */
+ len = -EIO;
+ }
- err = __hidp_send_ctrl_message(session, hdr, data, size);
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ mutex_unlock(&session->report_mutex);
- hidp_schedule(session);
+ return len;
- return err;
+err:
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ mutex_unlock(&session->report_mutex);
+ return ret;
}
-static int hidp_queue_report(struct hidp_session *session,
- unsigned char *data, int size)
+static int hidp_set_raw_report(struct hid_device *hid, unsigned char reportnum,
+ unsigned char *data, size_t count,
+ unsigned char report_type)
{
- struct sk_buff *skb;
-
- BT_DBG("session %p hid %p data %p size %d", session, session->hid, data, size);
+ struct hidp_session *session = hid->driver_data;
+ int ret;
- if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
- BT_ERR("Can't allocate memory for new frame");
- return -ENOMEM;
+ switch (report_type) {
+ case HID_FEATURE_REPORT:
+ report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+ break;
+ case HID_INPUT_REPORT:
+ report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_INPUT;
+ break;
+ case HID_OUTPUT_REPORT:
+ report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT;
+ break;
+ default:
+ return -EINVAL;
}
- *skb_put(skb, 1) = 0xa2;
- if (size > 0)
- memcpy(skb_put(skb, size), data, size);
+ if (mutex_lock_interruptible(&session->report_mutex))
+ return -ERESTARTSYS;
- skb_queue_tail(&session->intr_transmit, skb);
+ /* Set up our wait, and send the report request to the device. */
+ data[0] = reportnum;
+ set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ ret = hidp_send_ctrl_message(session, report_type, data, count);
+ if (ret)
+ goto err;
- hidp_schedule(session);
+ /* Wait for the ACK from the device. */
+ while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags) &&
+ !atomic_read(&session->terminate)) {
+ int res;
+
+ res = wait_event_interruptible_timeout(session->report_queue,
+ !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)
+ || atomic_read(&session->terminate),
+ 10*HZ);
+ if (res == 0) {
+ /* timeout */
+ ret = -EIO;
+ goto err;
+ }
+ if (res < 0) {
+ /* signal */
+ ret = -ERESTARTSYS;
+ goto err;
+ }
+ }
- return 0;
-}
+ if (!session->output_report_success) {
+ ret = -EIO;
+ goto err;
+ }
-static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
-{
- unsigned char buf[32];
- int rsize;
+ ret = count;
- rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- if (rsize > sizeof(buf))
- return -EIO;
+err:
+ clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+ mutex_unlock(&session->report_mutex);
+ return ret;
+}
- hid_output_report(report, buf);
+static int hidp_output_report(struct hid_device *hid, __u8 *data, size_t count)
+{
+ struct hidp_session *session = hid->driver_data;
- return hidp_queue_report(session, buf, rsize);
+ return hidp_send_intr_message(session,
+ HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT,
+ data, count);
}
-static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
- unsigned char report_type)
+static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
{
- switch (report_type) {
- case HID_FEATURE_REPORT:
- report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
- break;
- case HID_OUTPUT_REPORT:
- report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
- break;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ return hidp_get_raw_report(hid, reportnum, buf, len, rtype);
+ case HID_REQ_SET_REPORT:
+ return hidp_set_raw_report(hid, reportnum, buf, len, rtype);
default:
- return -EINVAL;
+ return -EIO;
}
-
- if (hidp_send_ctrl_message(hid->driver_data, report_type,
- data, count))
- return -ENOMEM;
- return count;
}
static void hidp_idle_timeout(unsigned long arg)
{
struct hidp_session *session = (struct hidp_session *) arg;
- atomic_inc(&session->terminate);
- hidp_schedule(session);
+ hidp_session_terminate(session);
}
static void hidp_set_timer(struct hidp_session *session)
@@ -347,26 +409,41 @@ static void hidp_set_timer(struct hidp_session *session)
mod_timer(&session->timer, jiffies + HZ * session->idle_to);
}
-static inline void hidp_del_timer(struct hidp_session *session)
+static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
del_timer(&session->timer);
}
+static void hidp_process_report(struct hidp_session *session,
+ int type, const u8 *data, int len, int intr)
+{
+ if (len > HID_MAX_BUFFER_SIZE)
+ len = HID_MAX_BUFFER_SIZE;
+
+ memcpy(session->input_buf, data, len);
+ hid_input_report(session->hid, type, session->input_buf, len, intr);
+}
+
static void hidp_process_handshake(struct hidp_session *session,
unsigned char param)
{
BT_DBG("session %p param 0x%02x", session, param);
+ session->output_report_success = 0; /* default condition */
switch (param) {
case HIDP_HSHK_SUCCESSFUL:
/* FIXME: Call into SET_ GET_ handlers here */
+ session->output_report_success = 1;
break;
case HIDP_HSHK_NOT_READY:
case HIDP_HSHK_ERR_INVALID_REPORT_ID:
case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
case HIDP_HSHK_ERR_INVALID_PARAMETER:
+ if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
+ wake_up_interruptible(&session->report_queue);
+
/* FIXME: Call into SET_ GET_ handlers here */
break;
@@ -376,15 +453,19 @@ static void hidp_process_handshake(struct hidp_session *session,
case HIDP_HSHK_ERR_FATAL:
/* Device requests a reboot, as this is the only way this error
* can be recovered. */
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0);
break;
default:
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
break;
}
+
+ /* Wake up the waiting thread. */
+ if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
+ wake_up_interruptible(&session->report_queue);
}
static void hidp_process_hid_control(struct hidp_session *session,
@@ -397,15 +478,15 @@ static void hidp_process_hid_control(struct hidp_session *session,
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- /* Kill session thread */
- atomic_inc(&session->terminate);
- hidp_schedule(session);
+ hidp_session_terminate(session);
}
}
-static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
+/* Returns true if the passed-in skb should be freed by the caller. */
+static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
unsigned char param)
{
+ int done_with_skb = 1;
BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
switch (param) {
@@ -416,8 +497,8 @@ static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
hidp_input_report(session, skb);
if (session->hid)
- hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
-
+ hidp_process_report(session, HID_INPUT_REPORT,
+ skb->data, skb->len, 0);
break;
case HIDP_DATA_RTYPE_OTHER:
@@ -426,15 +507,30 @@ static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
break;
default:
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
}
+
+ if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) &&
+ param == session->waiting_report_type) {
+ if (session->waiting_report_number < 0 ||
+ session->waiting_report_number == skb->data[0]) {
+ /* hidp_get_raw_report() is waiting on this report. */
+ session->report_return = skb;
+ done_with_skb = 0;
+ clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+ wake_up_interruptible(&session->report_queue);
+ }
+ }
+
+ return done_with_skb;
}
static void hidp_recv_ctrl_frame(struct hidp_session *session,
struct sk_buff *skb)
{
unsigned char hdr, type, param;
+ int free_skb = 1;
BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -454,16 +550,17 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
break;
case HIDP_TRANS_DATA:
- hidp_process_data(session, skb, param);
+ free_skb = hidp_process_data(session, skb, param);
break;
default:
- __hidp_send_ctrl_message(session,
+ hidp_send_ctrl_message(session,
HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0);
break;
}
- kfree_skb(skb);
+ if (free_skb)
+ kfree_skb(skb);
}
static void hidp_recv_intr_frame(struct hidp_session *session,
@@ -483,7 +580,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session,
hidp_input_report(session, skb);
if (session->hid) {
- hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1);
+ hidp_process_report(session, HID_INPUT_REPORT,
+ skb->data, skb->len, 1);
BT_DBG("report len %d", skb->len);
}
} else {
@@ -508,25 +606,24 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
return kernel_sendmsg(sock, &msg, &iv, 1, len);
}
-static void hidp_process_transmit(struct hidp_session *session)
+/* dequeue message from @transmit and send via @sock */
+static void hidp_process_transmit(struct hidp_session *session,
+ struct sk_buff_head *transmit,
+ struct socket *sock)
{
struct sk_buff *skb;
+ int ret;
BT_DBG("session %p", session);
- while ((skb = skb_dequeue(&session->ctrl_transmit))) {
- if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->ctrl_transmit, skb);
+ while ((skb = skb_dequeue(transmit))) {
+ ret = hidp_send_frame(sock, skb->data, skb->len);
+ if (ret == -EAGAIN) {
+ skb_queue_head(transmit, skb);
break;
- }
-
- hidp_set_timer(session);
- kfree_skb(skb);
- }
-
- while ((skb = skb_dequeue(&session->intr_transmit))) {
- if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
- skb_queue_head(&session->intr_transmit, skb);
+ } else if (ret < 0) {
+ hidp_session_terminate(session);
+ kfree_skb(skb);
break;
}
@@ -535,118 +632,11 @@ static void hidp_process_transmit(struct hidp_session *session)
}
}
-static int hidp_session(void *arg)
-{
- struct hidp_session *session = arg;
- struct sock *ctrl_sk = session->ctrl_sock->sk;
- struct sock *intr_sk = session->intr_sock->sk;
- struct sk_buff *skb;
- int vendor = 0x0000, product = 0x0000;
- wait_queue_t ctrl_wait, intr_wait;
-
- BT_DBG("session %p", session);
-
- if (session->input) {
- vendor = session->input->id.vendor;
- product = session->input->id.product;
- }
-
- if (session->hid) {
- vendor = session->hid->vendor;
- product = session->hid->product;
- }
-
- daemonize("khidpd_%04x%04x", vendor, product);
- set_user_nice(current, -15);
-
- init_waitqueue_entry(&ctrl_wait, current);
- init_waitqueue_entry(&intr_wait, current);
- add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
- add_wait_queue(sk_sleep(intr_sk), &intr_wait);
- while (!atomic_read(&session->terminate)) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED)
- break;
-
- while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
- skb_orphan(skb);
- hidp_recv_ctrl_frame(session, skb);
- }
-
- while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
- skb_orphan(skb);
- hidp_recv_intr_frame(session, skb);
- }
-
- hidp_process_transmit(session);
-
- schedule();
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
- remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
-
- down_write(&hidp_session_sem);
-
- hidp_del_timer(session);
-
- if (session->input) {
- input_unregister_device(session->input);
- session->input = NULL;
- }
-
- if (session->hid) {
- hid_destroy_device(session->hid);
- session->hid = NULL;
- }
-
- /* Wakeup user-space polling for socket errors */
- session->intr_sock->sk->sk_err = EUNATCH;
- session->ctrl_sock->sk->sk_err = EUNATCH;
-
- hidp_schedule(session);
-
- fput(session->intr_sock->file);
-
- wait_event_timeout(*(sk_sleep(ctrl_sk)),
- (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
-
- fput(session->ctrl_sock->file);
-
- __hidp_unlink_session(session);
-
- up_write(&hidp_session_sem);
-
- kfree(session);
- return 0;
-}
-
-static struct device *hidp_get_device(struct hidp_session *session)
-{
- bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
- bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
- struct device *device = NULL;
- struct hci_dev *hdev;
-
- hdev = hci_get_route(dst, src);
- if (!hdev)
- return NULL;
-
- session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
- if (session->conn)
- device = &session->conn->dev;
-
- hci_dev_put(hdev);
-
- return device;
-}
-
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct input_dev *input;
- int err, i;
+ int i;
input = input_allocate_device();
if (!input)
@@ -689,16 +679,10 @@ static int hidp_setup_input(struct hidp_session *session,
input->relbit[0] |= BIT_MASK(REL_WHEEL);
}
- input->dev.parent = hidp_get_device(session);
+ input->dev.parent = &session->conn->hcon->dev;
input->event = hidp_input_event;
- err = input_register_device(input);
- if (err < 0) {
- hci_conn_put_device(session->conn);
- return err;
- }
-
return 0;
}
@@ -721,17 +705,6 @@ static int hidp_parse(struct hid_device *hid)
static int hidp_start(struct hid_device *hid)
{
- struct hidp_session *session = hid->driver_data;
- struct hid_report *report;
-
- list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
- report_list, list)
- hidp_send_report(session, report);
-
- list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].
- report_list, list)
- hidp_send_report(session, report);
-
return 0;
}
@@ -751,9 +724,12 @@ static struct hid_ll_driver hidp_hid_driver = {
.stop = hidp_stop,
.open = hidp_open,
.close = hidp_close,
- .hidinput_input_event = hidp_hidinput_event,
+ .raw_request = hidp_raw_request,
+ .output_report = hidp_output_report,
};
+/* This function sets up the hid device. It does not add it
+ to the HID system. That is done in hidp_add_connection(). */
static int hidp_setup_hid(struct hidp_session *session,
struct hidp_connadd_req *req)
{
@@ -786,24 +762,28 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version;
hid->country = req->country;
- strncpy(hid->name, req->name, 128);
- strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
- strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
+ strncpy(hid->name, req->name, sizeof(req->name) - 1);
- hid->dev.parent = hidp_get_device(session);
- hid->ll_driver = &hidp_hid_driver;
+ snprintf(hid->phys, sizeof(hid->phys), "%pMR",
+ &l2cap_pi(session->ctrl_sock->sk)->chan->src);
- hid->hid_output_raw_report = hidp_output_raw_report;
+ /* NOTE: Some device modules depend on the dst address being stored in
+ * uniq. Please be aware of this before making changes to this behavior.
+ */
+ snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
+ &l2cap_pi(session->ctrl_sock->sk)->chan->dst);
- err = hid_add_device(hid);
- if (err < 0)
- goto failed;
+ hid->dev.parent = &session->conn->hcon->dev;
+ hid->ll_driver = &hidp_hid_driver;
- return 0;
+ /* True if device is blacklisted in drivers/hid/hid-core.c */
+ if (hid_ignore(hid)) {
+ hid_destroy_device(session->hid);
+ session->hid = NULL;
+ return -ENODEV;
+ }
-failed:
- hid_destroy_device(hid);
- session->hid = NULL;
+ return 0;
fault:
kfree(session->rd_data);
@@ -812,162 +792,602 @@ fault:
return err;
}
-int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
+/* initialize session devices */
+static int hidp_session_dev_init(struct hidp_session *session,
+ struct hidp_connadd_req *req)
{
- struct hidp_session *session, *s;
- int err;
+ int ret;
- BT_DBG("");
+ if (req->rd_size > 0) {
+ ret = hidp_setup_hid(session, req);
+ if (ret && ret != -ENODEV)
+ return ret;
+ }
- if (bacmp(&bt_sk(ctrl_sock->sk)->src, &bt_sk(intr_sock->sk)->src) ||
- bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
- return -ENOTUNIQ;
+ if (!session->hid) {
+ ret = hidp_setup_input(session, req);
+ if (ret < 0)
+ return ret;
+ }
- session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
- if (!session)
- return -ENOMEM;
+ return 0;
+}
- BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
+/* destroy session devices */
+static void hidp_session_dev_destroy(struct hidp_session *session)
+{
+ if (session->hid)
+ put_device(&session->hid->dev);
+ else if (session->input)
+ input_put_device(session->input);
- down_write(&hidp_session_sem);
+ kfree(session->rd_data);
+ session->rd_data = NULL;
+}
- s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
- if (s && s->state == BT_CONNECTED) {
- err = -EEXIST;
- goto failed;
+/* add HID/input devices to their underlying bus systems */
+static int hidp_session_dev_add(struct hidp_session *session)
+{
+ int ret;
+
+ /* Both HID and input systems drop a ref-count when unregistering the
+ * device but they don't take a ref-count when registering them. Work
+ * around this by explicitly taking a refcount during registration
+ * which is dropped automatically by unregistering the devices. */
+
+ if (session->hid) {
+ ret = hid_add_device(session->hid);
+ if (ret)
+ return ret;
+ get_device(&session->hid->dev);
+ } else if (session->input) {
+ ret = input_register_device(session->input);
+ if (ret)
+ return ret;
+ input_get_device(session->input);
}
- bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
+ return 0;
+}
- session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu);
- session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu);
+/* remove HID/input devices from their bus systems */
+static void hidp_session_dev_del(struct hidp_session *session)
+{
+ if (session->hid)
+ hid_destroy_device(session->hid);
+ else if (session->input)
+ input_unregister_device(session->input);
+}
- BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
+/*
+ * Asynchronous device registration
+ * HID device drivers might want to perform I/O during initialization to
+ * detect device types. Therefore, call device registration in a separate
+ * worker so the HIDP thread can schedule I/O operations.
+ * Note that this must be called after the worker thread was initialized
+ * successfully. This will then add the devices and increase session state
+ * on success, otherwise it will terminate the session thread.
+ */
+static void hidp_session_dev_work(struct work_struct *work)
+{
+ struct hidp_session *session = container_of(work,
+ struct hidp_session,
+ dev_init);
+ int ret;
- session->ctrl_sock = ctrl_sock;
- session->intr_sock = intr_sock;
- session->state = BT_CONNECTED;
+ ret = hidp_session_dev_add(session);
+ if (!ret)
+ atomic_inc(&session->state);
+ else
+ hidp_session_terminate(session);
+}
+
+/*
+ * Create new session object
+ * Allocate session object, initialize static fields, copy input data into the
+ * object and take a reference to all sub-objects.
+ * This returns 0 on success and puts a pointer to the new session object in
+ * \out. Otherwise, an error code is returned.
+ * The new session object has an initial ref-count of 1.
+ */
+static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
+ struct socket *ctrl_sock,
+ struct socket *intr_sock,
+ struct hidp_connadd_req *req,
+ struct l2cap_conn *conn)
+{
+ struct hidp_session *session;
+ int ret;
+ struct bt_sock *ctrl, *intr;
- setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
+ ctrl = bt_sk(ctrl_sock->sk);
+ intr = bt_sk(intr_sock->sk);
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+ /* object and runtime management */
+ kref_init(&session->ref);
+ atomic_set(&session->state, HIDP_SESSION_IDLING);
+ init_waitqueue_head(&session->state_queue);
+ session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
+
+ /* connection management */
+ bacpy(&session->bdaddr, bdaddr);
+ session->conn = conn;
+ session->user.probe = hidp_session_probe;
+ session->user.remove = hidp_session_remove;
+ session->ctrl_sock = ctrl_sock;
+ session->intr_sock = intr_sock;
skb_queue_head_init(&session->ctrl_transmit);
skb_queue_head_init(&session->intr_transmit);
-
- session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
+ session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl)->chan->omtu,
+ l2cap_pi(ctrl)->chan->imtu);
+ session->intr_mtu = min_t(uint, l2cap_pi(intr)->chan->omtu,
+ l2cap_pi(intr)->chan->imtu);
session->idle_to = req->idle_to;
- if (req->rd_size > 0) {
- err = hidp_setup_hid(session, req);
- if (err && err != -ENODEV)
- goto purge;
- }
+ /* device management */
+ INIT_WORK(&session->dev_init, hidp_session_dev_work);
+ setup_timer(&session->timer, hidp_idle_timeout,
+ (unsigned long)session);
- if (!session->hid) {
- err = hidp_setup_input(session, req);
- if (err < 0)
- goto purge;
+ /* session data */
+ mutex_init(&session->report_mutex);
+ init_waitqueue_head(&session->report_queue);
+
+ ret = hidp_session_dev_init(session, req);
+ if (ret)
+ goto err_free;
+
+ l2cap_conn_get(session->conn);
+ get_file(session->intr_sock->file);
+ get_file(session->ctrl_sock->file);
+ *out = session;
+ return 0;
+
+err_free:
+ kfree(session);
+ return ret;
+}
+
+/* increase ref-count of the given session by one */
+static void hidp_session_get(struct hidp_session *session)
+{
+ kref_get(&session->ref);
+}
+
+/* release callback */
+static void session_free(struct kref *ref)
+{
+ struct hidp_session *session = container_of(ref, struct hidp_session,
+ ref);
+
+ hidp_session_dev_destroy(session);
+ skb_queue_purge(&session->ctrl_transmit);
+ skb_queue_purge(&session->intr_transmit);
+ fput(session->intr_sock->file);
+ fput(session->ctrl_sock->file);
+ l2cap_conn_put(session->conn);
+ kfree(session);
+}
+
+/* decrease ref-count of the given session by one */
+static void hidp_session_put(struct hidp_session *session)
+{
+ kref_put(&session->ref, session_free);
+}
+
+/*
+ * Search the list of active sessions for a session with target address
+ * \bdaddr. You must hold at least a read-lock on \hidp_session_sem. As long as
+ * you do not release this lock, the session objects cannot vanish and you can
+ * safely take a reference to the session yourself.
+ */
+static struct hidp_session *__hidp_session_find(const bdaddr_t *bdaddr)
+{
+ struct hidp_session *session;
+
+ list_for_each_entry(session, &hidp_session_list, list) {
+ if (!bacmp(bdaddr, &session->bdaddr))
+ return session;
}
- __hidp_link_session(session);
+ return NULL;
+}
- hidp_set_timer(session);
+/*
+ * Same as __hidp_session_find() but no locks must be held. This also takes a
+ * reference of the returned session (if non-NULL) so you must drop this
+ * reference if you no longer use the object.
+ */
+static struct hidp_session *hidp_session_find(const bdaddr_t *bdaddr)
+{
+ struct hidp_session *session;
+
+ down_read(&hidp_session_sem);
- err = kernel_thread(hidp_session, session, CLONE_KERNEL);
- if (err < 0)
- goto unlink;
+ session = __hidp_session_find(bdaddr);
+ if (session)
+ hidp_session_get(session);
- if (session->input) {
- hidp_send_ctrl_message(session,
- HIDP_TRANS_SET_PROTOCOL | HIDP_PROTO_BOOT, NULL, 0);
- session->flags |= (1 << HIDP_BOOT_PROTOCOL_MODE);
+ up_read(&hidp_session_sem);
+
+ return session;
+}
+
+/*
+ * Start session synchronously
+ * This starts a session thread and waits until initialization
+ * is done or returns an error if it couldn't be started.
+ * If this returns 0 the session thread is up and running. You must call
+ * hipd_session_stop_sync() before deleting any runtime resources.
+ */
+static int hidp_session_start_sync(struct hidp_session *session)
+{
+ unsigned int vendor, product;
- session->leds = 0xff;
- hidp_input_event(session->input, EV_LED, 0, 0);
+ if (session->hid) {
+ vendor = session->hid->vendor;
+ product = session->hid->product;
+ } else if (session->input) {
+ vendor = session->input->id.vendor;
+ product = session->input->id.product;
+ } else {
+ vendor = 0x0000;
+ product = 0x0000;
}
- up_write(&hidp_session_sem);
+ session->task = kthread_run(hidp_session_thread, session,
+ "khidpd_%04x%04x", vendor, product);
+ if (IS_ERR(session->task))
+ return PTR_ERR(session->task);
+
+ while (atomic_read(&session->state) <= HIDP_SESSION_IDLING)
+ wait_event(session->state_queue,
+ atomic_read(&session->state) > HIDP_SESSION_IDLING);
+
return 0;
+}
-unlink:
- hidp_del_timer(session);
+/*
+ * Terminate session thread
+ * Wake up session thread and notify it to stop. This is asynchronous and
+ * returns immediately. Call this whenever a runtime error occurs and you want
+ * the session to stop.
+ * Note: wake_up_process() performs any necessary memory-barriers for us.
+ */
+static void hidp_session_terminate(struct hidp_session *session)
+{
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
+}
+
+/*
+ * Probe HIDP session
+ * This is called from the l2cap_conn core when our l2cap_user object is bound
+ * to the hci-connection. We get the session via the \user object and can now
+ * start the session thread, link it into the global session list and
+ * schedule HID/input device registration.
+ * The global session-list owns its own reference to the session object so you
+ * can drop your own reference after registering the l2cap_user object.
+ */
+static int hidp_session_probe(struct l2cap_conn *conn,
+ struct l2cap_user *user)
+{
+ struct hidp_session *session = container_of(user,
+ struct hidp_session,
+ user);
+ struct hidp_session *s;
+ int ret;
- __hidp_unlink_session(session);
+ down_write(&hidp_session_sem);
- if (session->input) {
- input_unregister_device(session->input);
- session->input = NULL;
+ /* check that no other session for this device exists */
+ s = __hidp_session_find(&session->bdaddr);
+ if (s) {
+ ret = -EEXIST;
+ goto out_unlock;
}
- if (session->hid) {
- hid_destroy_device(session->hid);
- session->hid = NULL;
+ if (session->input) {
+ ret = hidp_session_dev_add(session);
+ if (ret)
+ goto out_unlock;
}
- kfree(session->rd_data);
- session->rd_data = NULL;
+ ret = hidp_session_start_sync(session);
+ if (ret)
+ goto out_del;
-purge:
- skb_queue_purge(&session->ctrl_transmit);
- skb_queue_purge(&session->intr_transmit);
+ /* HID device registration is async to allow I/O during probe */
+ if (session->input)
+ atomic_inc(&session->state);
+ else
+ schedule_work(&session->dev_init);
+
+ hidp_session_get(session);
+ list_add(&session->list, &hidp_session_list);
+ ret = 0;
+ goto out_unlock;
-failed:
+out_del:
+ if (session->input)
+ hidp_session_dev_del(session);
+out_unlock:
up_write(&hidp_session_sem);
+ return ret;
+}
- input_free_device(session->input);
- kfree(session);
- return err;
+/*
+ * Remove HIDP session
+ * Called from the l2cap_conn core when either we explicitly unregistered
+ * the l2cap_user object or if the underlying connection is shut down.
+ * We signal the hidp-session thread to shut down, unregister the HID/input
+ * devices and unlink the session from the global list.
+ * This drops the reference to the session that is owned by the global
+ * session-list.
+ * Note: We _must_ not synchronosly wait for the session-thread to shut down.
+ * This is, because the session-thread might be waiting for an HCI lock that is
+ * held while we are called. Therefore, we only unregister the devices and
+ * notify the session-thread to terminate. The thread itself owns a reference
+ * to the session object so it can safely shut down.
+ */
+static void hidp_session_remove(struct l2cap_conn *conn,
+ struct l2cap_user *user)
+{
+ struct hidp_session *session = container_of(user,
+ struct hidp_session,
+ user);
+
+ down_write(&hidp_session_sem);
+
+ hidp_session_terminate(session);
+
+ cancel_work_sync(&session->dev_init);
+ if (session->input ||
+ atomic_read(&session->state) > HIDP_SESSION_PREPARING)
+ hidp_session_dev_del(session);
+
+ list_del(&session->list);
+
+ up_write(&hidp_session_sem);
+
+ hidp_session_put(session);
+}
+
+/*
+ * Session Worker
+ * This performs the actual main-loop of the HIDP worker. We first check
+ * whether the underlying connection is still alive, then parse all pending
+ * messages and finally send all outstanding messages.
+ */
+static void hidp_session_run(struct hidp_session *session)
+{
+ struct sock *ctrl_sk = session->ctrl_sock->sk;
+ struct sock *intr_sk = session->intr_sock->sk;
+ struct sk_buff *skb;
+
+ for (;;) {
+ /*
+ * This thread can be woken up two ways:
+ * - You call hidp_session_terminate() which sets the
+ * session->terminate flag and wakes this thread up.
+ * - Via modifying the socket state of ctrl/intr_sock. This
+ * thread is woken up by ->sk_state_changed().
+ *
+ * Note: set_current_state() performs any necessary
+ * memory-barriers for us.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (atomic_read(&session->terminate))
+ break;
+
+ if (ctrl_sk->sk_state != BT_CONNECTED ||
+ intr_sk->sk_state != BT_CONNECTED)
+ break;
+
+ /* parse incoming intr-skbs */
+ while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
+ skb_orphan(skb);
+ if (!skb_linearize(skb))
+ hidp_recv_intr_frame(session, skb);
+ else
+ kfree_skb(skb);
+ }
+
+ /* send pending intr-skbs */
+ hidp_process_transmit(session, &session->intr_transmit,
+ session->intr_sock);
+
+ /* parse incoming ctrl-skbs */
+ while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
+ skb_orphan(skb);
+ if (!skb_linearize(skb))
+ hidp_recv_ctrl_frame(session, skb);
+ else
+ kfree_skb(skb);
+ }
+
+ /* send pending ctrl-skbs */
+ hidp_process_transmit(session, &session->ctrl_transmit,
+ session->ctrl_sock);
+
+ schedule();
+ }
+
+ atomic_inc(&session->terminate);
+ set_current_state(TASK_RUNNING);
+}
+
+/*
+ * HIDP session thread
+ * This thread runs the I/O for a single HIDP session. Startup is synchronous
+ * which allows us to take references to ourself here instead of doing that in
+ * the caller.
+ * When we are ready to run we notify the caller and call hidp_session_run().
+ */
+static int hidp_session_thread(void *arg)
+{
+ struct hidp_session *session = arg;
+ wait_queue_t ctrl_wait, intr_wait;
+
+ BT_DBG("session %p", session);
+
+ /* initialize runtime environment */
+ hidp_session_get(session);
+ __module_get(THIS_MODULE);
+ set_user_nice(current, -15);
+ hidp_set_timer(session);
+
+ init_waitqueue_entry(&ctrl_wait, current);
+ init_waitqueue_entry(&intr_wait, current);
+ add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
+ add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
+ /* This memory barrier is paired with wq_has_sleeper(). See
+ * sock_poll_wait() for more information why this is needed. */
+ smp_mb();
+
+ /* notify synchronous startup that we're ready */
+ atomic_inc(&session->state);
+ wake_up(&session->state_queue);
+
+ /* run session */
+ hidp_session_run(session);
+
+ /* cleanup runtime environment */
+ remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
+ remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
+ wake_up_interruptible(&session->report_queue);
+ hidp_del_timer(session);
+
+ /*
+ * If we stopped ourself due to any internal signal, we should try to
+ * unregister our own session here to avoid having it linger until the
+ * parent l2cap_conn dies or user-space cleans it up.
+ * This does not deadlock as we don't do any synchronous shutdown.
+ * Instead, this call has the same semantics as if user-space tried to
+ * delete the session.
+ */
+ l2cap_unregister_user(session->conn, &session->user);
+ hidp_session_put(session);
+
+ module_put_and_exit(0);
+ return 0;
}
-int hidp_del_connection(struct hidp_conndel_req *req)
+static int hidp_verify_sockets(struct socket *ctrl_sock,
+ struct socket *intr_sock)
{
+ struct l2cap_chan *ctrl_chan, *intr_chan;
+ struct bt_sock *ctrl, *intr;
struct hidp_session *session;
- int err = 0;
- BT_DBG("");
+ if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))
+ return -EINVAL;
- down_read(&hidp_session_sem);
+ ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan;
+ intr_chan = l2cap_pi(intr_sock->sk)->chan;
+
+ if (bacmp(&ctrl_chan->src, &intr_chan->src) ||
+ bacmp(&ctrl_chan->dst, &intr_chan->dst))
+ return -ENOTUNIQ;
+
+ ctrl = bt_sk(ctrl_sock->sk);
+ intr = bt_sk(intr_sock->sk);
- session = __hidp_get_session(&req->bdaddr);
+ if (ctrl->sk.sk_state != BT_CONNECTED ||
+ intr->sk.sk_state != BT_CONNECTED)
+ return -EBADFD;
+
+ /* early session check, we check again during session registration */
+ session = hidp_session_find(&ctrl_chan->dst);
if (session) {
- if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) {
- hidp_send_ctrl_message(session,
- HIDP_TRANS_HID_CONTROL | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, NULL, 0);
- } else {
- /* Flush the transmit queues */
- skb_queue_purge(&session->ctrl_transmit);
- skb_queue_purge(&session->intr_transmit);
-
- /* Wakeup user-space polling for socket errors */
- session->intr_sock->sk->sk_err = EUNATCH;
- session->ctrl_sock->sk->sk_err = EUNATCH;
-
- /* Kill session thread */
- atomic_inc(&session->terminate);
- hidp_schedule(session);
- }
- } else
- err = -ENOENT;
+ hidp_session_put(session);
+ return -EEXIST;
+ }
- up_read(&hidp_session_sem);
- return err;
+ return 0;
+}
+
+int hidp_connection_add(struct hidp_connadd_req *req,
+ struct socket *ctrl_sock,
+ struct socket *intr_sock)
+{
+ struct hidp_session *session;
+ struct l2cap_conn *conn;
+ struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+ int ret;
+
+ ret = hidp_verify_sockets(ctrl_sock, intr_sock);
+ if (ret)
+ return ret;
+
+ conn = NULL;
+ l2cap_chan_lock(chan);
+ if (chan->conn) {
+ l2cap_conn_get(chan->conn);
+ conn = chan->conn;
+ }
+ l2cap_chan_unlock(chan);
+
+ if (!conn)
+ return -EBADFD;
+
+ ret = hidp_session_new(&session, &chan->dst, ctrl_sock,
+ intr_sock, req, conn);
+ if (ret)
+ goto out_conn;
+
+ ret = l2cap_register_user(conn, &session->user);
+ if (ret)
+ goto out_session;
+
+ ret = 0;
+
+out_session:
+ hidp_session_put(session);
+out_conn:
+ l2cap_conn_put(conn);
+ return ret;
+}
+
+int hidp_connection_del(struct hidp_conndel_req *req)
+{
+ struct hidp_session *session;
+
+ session = hidp_session_find(&req->bdaddr);
+ if (!session)
+ return -ENOENT;
+
+ if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG))
+ hidp_send_ctrl_message(session,
+ HIDP_TRANS_HID_CONTROL |
+ HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
+ NULL, 0);
+ else
+ l2cap_unregister_user(session->conn, &session->user);
+
+ hidp_session_put(session);
+
+ return 0;
}
int hidp_get_connlist(struct hidp_connlist_req *req)
{
- struct list_head *p;
+ struct hidp_session *session;
int err = 0, n = 0;
BT_DBG("");
down_read(&hidp_session_sem);
- list_for_each(p, &hidp_session_list) {
- struct hidp_session *session;
+ list_for_each_entry(session, &hidp_session_list, list) {
struct hidp_conninfo ci;
- session = list_entry(p, struct hidp_session, list);
-
- __hidp_copy_session(session, &ci);
+ hidp_copy_session(session, &ci);
if (copy_to_user(req->ci, &ci, sizeof(ci))) {
err = -EFAULT;
@@ -988,63 +1408,33 @@ int hidp_get_connlist(struct hidp_connlist_req *req)
int hidp_get_conninfo(struct hidp_conninfo *ci)
{
struct hidp_session *session;
- int err = 0;
-
- down_read(&hidp_session_sem);
- session = __hidp_get_session(&ci->bdaddr);
- if (session)
- __hidp_copy_session(session, ci);
- else
- err = -ENOENT;
+ session = hidp_session_find(&ci->bdaddr);
+ if (session) {
+ hidp_copy_session(session, ci);
+ hidp_session_put(session);
+ }
- up_read(&hidp_session_sem);
- return err;
+ return session ? 0 : -ENOENT;
}
-static const struct hid_device_id hidp_table[] = {
- { HID_BLUETOOTH_DEVICE(HID_ANY_ID, HID_ANY_ID) },
- { }
-};
-
-static struct hid_driver hidp_driver = {
- .name = "generic-bluetooth",
- .id_table = hidp_table,
-};
-
static int __init hidp_init(void)
{
- int ret;
-
- l2cap_load();
-
BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION);
- ret = hid_register_driver(&hidp_driver);
- if (ret)
- goto err;
-
- ret = hidp_init_sockets();
- if (ret)
- goto err_drv;
-
- return 0;
-err_drv:
- hid_unregister_driver(&hidp_driver);
-err:
- return ret;
+ return hidp_init_sockets();
}
static void __exit hidp_exit(void)
{
hidp_cleanup_sockets();
- hid_unregister_driver(&hidp_driver);
}
module_init(hidp_init);
module_exit(hidp_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 8d934a19da0..8798492a6e9 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,7 +24,10 @@
#define __HIDP_H
#include <linux/types.h>
+#include <linux/hid.h>
+#include <linux/kref.h>
#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/l2cap.h>
/* HIDP header masks */
#define HIDP_HEADER_TRANS_MASK 0xf0
@@ -80,10 +83,12 @@
#define HIDP_VIRTUAL_CABLE_UNPLUG 0
#define HIDP_BOOT_PROTOCOL_MODE 1
#define HIDP_BLUETOOTH_VENDOR_ID 9
+#define HIDP_WAITING_FOR_RETURN 10
+#define HIDP_WAITING_FOR_SEND_ACK 11
struct hidp_connadd_req {
- int ctrl_sock; // Connected control socket
- int intr_sock; // Connteted interrupt socket
+ int ctrl_sock; /* Connected control socket */
+ int intr_sock; /* Connected interrupt socket */
__u16 parser;
__u16 rd_size;
__u8 __user *rd_data;
@@ -117,59 +122,71 @@ struct hidp_connlist_req {
struct hidp_conninfo __user *ci;
};
-int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
-int hidp_del_connection(struct hidp_conndel_req *req);
+int hidp_connection_add(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
+int hidp_connection_del(struct hidp_conndel_req *req);
int hidp_get_connlist(struct hidp_connlist_req *req);
int hidp_get_conninfo(struct hidp_conninfo *ci);
+enum hidp_session_state {
+ HIDP_SESSION_IDLING,
+ HIDP_SESSION_PREPARING,
+ HIDP_SESSION_RUNNING,
+};
+
/* HIDP session defines */
struct hidp_session {
struct list_head list;
+ struct kref ref;
- struct hci_conn *conn;
+ /* runtime management */
+ atomic_t state;
+ wait_queue_head_t state_queue;
+ atomic_t terminate;
+ struct task_struct *task;
+ unsigned long flags;
+ /* connection management */
+ bdaddr_t bdaddr;
+ struct l2cap_conn *conn;
+ struct l2cap_user user;
struct socket *ctrl_sock;
struct socket *intr_sock;
-
- bdaddr_t bdaddr;
-
- unsigned long state;
- unsigned long flags;
- unsigned long idle_to;
-
+ struct sk_buff_head ctrl_transmit;
+ struct sk_buff_head intr_transmit;
uint ctrl_mtu;
uint intr_mtu;
+ unsigned long idle_to;
- atomic_t terminate;
-
- unsigned char keys[8];
- unsigned char leds;
-
+ /* device management */
+ struct work_struct dev_init;
struct input_dev *input;
-
struct hid_device *hid;
-
struct timer_list timer;
- struct sk_buff_head ctrl_transmit;
- struct sk_buff_head intr_transmit;
-
/* Report descriptor */
__u8 *rd_data;
uint rd_size;
-};
-static inline void hidp_schedule(struct hidp_session *session)
-{
- struct sock *ctrl_sk = session->ctrl_sock->sk;
- struct sock *intr_sk = session->intr_sock->sk;
+ /* session data */
+ unsigned char keys[8];
+ unsigned char leds;
- wake_up_interruptible(sk_sleep(ctrl_sk));
- wake_up_interruptible(sk_sleep(intr_sk));
-}
+ /* Used in hidp_get_raw_report() */
+ int waiting_report_type; /* HIDP_DATA_RTYPE_* */
+ int waiting_report_number; /* -1 for not numbered */
+ struct mutex report_mutex;
+ struct sk_buff *report_return;
+ wait_queue_head_t report_queue;
+
+ /* Used in hidp_output_raw_report() */
+ int output_report_success; /* boolean */
+
+ /* temporary input buffer */
+ u8 input_buf[HID_MAX_BUFFER_SIZE];
+};
/* HIDP init defines */
-extern int __init hidp_init_sockets(void);
-extern void __exit hidp_cleanup_sockets(void);
+int __init hidp_init_sockets(void);
+void __exit hidp_cleanup_sockets(void);
#endif /* __HIDP_H */
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 250dfd46237..cb3fdde1968 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,25 +20,15 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <net/sock.h>
#include "hidp.h"
+static struct bt_sock_list hidp_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock)
+};
+
static int hidp_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -48,6 +38,8 @@ static int hidp_sock_release(struct socket *sock)
if (!sk)
return 0;
+ bt_sock_unlink(&hidp_sk_list, sk);
+
sock_orphan(sk);
sock_put(sk);
@@ -70,7 +62,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
switch (cmd) {
case HIDPCONNADD:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
@@ -85,31 +77,23 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return err;
}
- if (csock->sk->sk_state != BT_CONNECTED || isock->sk->sk_state != BT_CONNECTED) {
- sockfd_put(csock);
- sockfd_put(isock);
- return -EBADFD;
- }
+ err = hidp_connection_add(&ca, csock, isock);
+ if (!err && copy_to_user(argp, &ca, sizeof(ca)))
+ err = -EFAULT;
- err = hidp_add_connection(&ca, csock, isock);
- if (!err) {
- if (copy_to_user(argp, &ca, sizeof(ca)))
- err = -EFAULT;
- } else {
- sockfd_put(csock);
- sockfd_put(isock);
- }
+ sockfd_put(csock);
+ sockfd_put(isock);
return err;
case HIDPCONNDEL:
if (!capable(CAP_NET_ADMIN))
- return -EACCES;
+ return -EPERM;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
- return hidp_del_connection(&cd);
+ return hidp_connection_del(&cd);
case HIDPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
@@ -140,8 +124,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
#ifdef CONFIG_COMPAT
struct compat_hidp_connadd_req {
- int ctrl_sock; // Connected control socket
- int intr_sock; // Connteted interrupt socket
+ int ctrl_sock; /* Connected control socket */
+ int intr_sock; /* Connected interrupt socket */
__u16 parser;
__u16 rd_size;
compat_uptr_t rd_data;
@@ -159,10 +143,10 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
{
if (cmd == HIDPGETCONNLIST) {
struct hidp_connlist_req cl;
- uint32_t uci;
+ u32 uci;
int err;
- if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+ if (get_user(cl.cnum, (u32 __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
@@ -173,7 +157,7 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = hidp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+ if (!err && put_user(cl.cnum, (u32 __user *) arg))
err = -EFAULT;
return err;
@@ -266,6 +250,8 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
+ bt_sock_link(&hidp_sk_list, sk);
+
return 0;
}
@@ -284,21 +270,30 @@ int __init hidp_init_sockets(void)
return err;
err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("Can't register HIDP socket");
+ goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "hidp", &hidp_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create HIDP proc file");
+ bt_sock_unregister(BTPROTO_HIDP);
goto error;
+ }
+
+ BT_INFO("HIDP socket layer initialized");
return 0;
error:
- BT_ERR("Can't register HIDP socket");
proto_unregister(&hidp_proto);
return err;
}
void __exit hidp_cleanup_sockets(void)
{
- if (bt_sock_unregister(BTPROTO_HIDP) < 0)
- BT_ERR("Can't unregister HIDP socket");
-
+ bt_procfs_cleanup(&init_net, "hidp");
+ bt_sock_unregister(BTPROTO_HIDP);
proto_unregister(&hidp_proto);
}
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
deleted file mode 100644
index 675614e38e1..00000000000
--- a/net/bluetooth/l2cap.c
+++ /dev/null
@@ -1,4930 +0,0 @@
-/*
- BlueZ - Bluetooth protocol stack for Linux
- Copyright (C) 2000-2001 Qualcomm Incorporated
- Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
- Copyright (C) 2010 Google Inc.
-
- Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 2 as
- published by the Free Software Foundation;
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
- IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
- SOFTWARE IS DISCLAIMED.
-*/
-
-/* Bluetooth L2CAP core and sockets. */
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/crc16.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <asm/unaligned.h>
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
-
-#define VERSION "2.15"
-
-static int disable_ertm;
-
-static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
-
-static const struct proto_ops l2cap_sock_ops;
-
-static struct workqueue_struct *_busy_wq;
-
-static struct bt_sock_list l2cap_sk_list = {
- .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
-};
-
-static void l2cap_busy_work(struct work_struct *work);
-
-static void __l2cap_sock_close(struct sock *sk, int reason);
-static void l2cap_sock_close(struct sock *sk);
-static void l2cap_sock_kill(struct sock *sk);
-
-static int l2cap_build_conf_req(struct sock *sk, void *data);
-static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- u8 code, u8 ident, u16 dlen, void *data);
-
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
-
-/* ---- L2CAP timers ---- */
-static void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
- BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-static void l2cap_sock_clear_timer(struct sock *sk)
-{
- BT_DBG("sock %p state %d", sk, sk->sk_state);
- sk_stop_timer(sk, &sk->sk_timer);
-}
-
-static void l2cap_sock_timeout(unsigned long arg)
-{
- struct sock *sk = (struct sock *) arg;
- int reason;
-
- BT_DBG("sock %p state %d", sk, sk->sk_state);
-
- bh_lock_sock(sk);
-
- if (sock_owned_by_user(sk)) {
- /* sk is owned by user. Try again later */
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
- sock_put(sk);
- return;
- }
-
- if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
- reason = ECONNREFUSED;
- else if (sk->sk_state == BT_CONNECT &&
- l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
- reason = ECONNREFUSED;
- else
- reason = ETIMEDOUT;
-
- __l2cap_sock_close(sk, reason);
-
- bh_unlock_sock(sk);
-
- l2cap_sock_kill(sk);
- sock_put(sk);
-}
-
-/* ---- L2CAP channels ---- */
-static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
-{
- struct sock *s;
- for (s = l->head; s; s = l2cap_pi(s)->next_c) {
- if (l2cap_pi(s)->dcid == cid)
- break;
- }
- return s;
-}
-
-static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
-{
- struct sock *s;
- for (s = l->head; s; s = l2cap_pi(s)->next_c) {
- if (l2cap_pi(s)->scid == cid)
- break;
- }
- return s;
-}
-
-/* Find channel with given SCID.
- * Returns locked socket */
-static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
-{
- struct sock *s;
- read_lock(&l->lock);
- s = __l2cap_get_chan_by_scid(l, cid);
- if (s)
- bh_lock_sock(s);
- read_unlock(&l->lock);
- return s;
-}
-
-static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
-{
- struct sock *s;
- for (s = l->head; s; s = l2cap_pi(s)->next_c) {
- if (l2cap_pi(s)->ident == ident)
- break;
- }
- return s;
-}
-
-static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
-{
- struct sock *s;
- read_lock(&l->lock);
- s = __l2cap_get_chan_by_ident(l, ident);
- if (s)
- bh_lock_sock(s);
- read_unlock(&l->lock);
- return s;
-}
-
-static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
-{
- u16 cid = L2CAP_CID_DYN_START;
-
- for (; cid < L2CAP_CID_DYN_END; cid++) {
- if (!__l2cap_get_chan_by_scid(l, cid))
- return cid;
- }
-
- return 0;
-}
-
-static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
-{
- sock_hold(sk);
-
- if (l->head)
- l2cap_pi(l->head)->prev_c = sk;
-
- l2cap_pi(sk)->next_c = l->head;
- l2cap_pi(sk)->prev_c = NULL;
- l->head = sk;
-}
-
-static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
-{
- struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
-
- write_lock_bh(&l->lock);
- if (sk == l->head)
- l->head = next;
-
- if (next)
- l2cap_pi(next)->prev_c = prev;
- if (prev)
- l2cap_pi(prev)->next_c = next;
- write_unlock_bh(&l->lock);
-
- __sock_put(sk);
-}
-
-static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
-{
- struct l2cap_chan_list *l = &conn->chan_list;
-
- BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
-
- conn->disc_reason = 0x13;
-
- l2cap_pi(sk)->conn = conn;
-
- if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
- /* Alloc CID for connection-oriented socket */
- l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
- } else if (sk->sk_type == SOCK_DGRAM) {
- /* Connectionless socket */
- l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
- l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
- l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
- } else {
- /* Raw socket can send/recv signalling messages only */
- l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
- l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
- l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
- }
-
- __l2cap_chan_link(l, sk);
-
- if (parent)
- bt_accept_enqueue(parent, sk);
-}
-
-/* Delete channel.
- * Must be called on the locked socket. */
-static void l2cap_chan_del(struct sock *sk, int err)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sock *parent = bt_sk(sk)->parent;
-
- l2cap_sock_clear_timer(sk);
-
- BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
-
- if (conn) {
- /* Unlink from channel list */
- l2cap_chan_unlink(&conn->chan_list, sk);
- l2cap_pi(sk)->conn = NULL;
- hci_conn_put(conn->hcon);
- }
-
- sk->sk_state = BT_CLOSED;
- sock_set_flag(sk, SOCK_ZAPPED);
-
- if (err)
- sk->sk_err = err;
-
- if (parent) {
- bt_accept_unlink(sk);
- parent->sk_data_ready(parent, 0);
- } else
- sk->sk_state_change(sk);
-
- skb_queue_purge(TX_QUEUE(sk));
-
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
- struct srej_list *l, *tmp;
-
- del_timer(&l2cap_pi(sk)->retrans_timer);
- del_timer(&l2cap_pi(sk)->monitor_timer);
- del_timer(&l2cap_pi(sk)->ack_timer);
-
- skb_queue_purge(SREJ_QUEUE(sk));
- skb_queue_purge(BUSY_QUEUE(sk));
-
- list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
- list_del(&l->list);
- kfree(l);
- }
- }
-}
-
-static inline u8 l2cap_get_auth_type(struct sock *sk)
-{
- if (sk->sk_type == SOCK_RAW) {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_HIGH:
- return HCI_AT_DEDICATED_BONDING_MITM;
- case BT_SECURITY_MEDIUM:
- return HCI_AT_DEDICATED_BONDING;
- default:
- return HCI_AT_NO_BONDING;
- }
- } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
-
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
- return HCI_AT_NO_BONDING_MITM;
- else
- return HCI_AT_NO_BONDING;
- } else {
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_HIGH:
- return HCI_AT_GENERAL_BONDING_MITM;
- case BT_SECURITY_MEDIUM:
- return HCI_AT_GENERAL_BONDING;
- default:
- return HCI_AT_NO_BONDING;
- }
- }
-}
-
-/* Service level security */
-static inline int l2cap_check_security(struct sock *sk)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- __u8 auth_type;
-
- auth_type = l2cap_get_auth_type(sk);
-
- return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
- auth_type);
-}
-
-static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
-{
- u8 id;
-
- /* Get next available identificator.
- * 1 - 128 are used by kernel.
- * 129 - 199 are reserved.
- * 200 - 254 are used by utilities like l2ping, etc.
- */
-
- spin_lock_bh(&conn->lock);
-
- if (++conn->tx_ident > 128)
- conn->tx_ident = 1;
-
- id = conn->tx_ident;
-
- spin_unlock_bh(&conn->lock);
-
- return id;
-}
-
-static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
-{
- struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
-
- BT_DBG("code 0x%2.2x", code);
-
- if (!skb)
- return;
-
- hci_send_acl(conn->hcon, skb, 0);
-}
-
-static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
-{
- struct sk_buff *skb;
- struct l2cap_hdr *lh;
- struct l2cap_conn *conn = pi->conn;
- struct sock *sk = (struct sock *)pi;
- int count, hlen = L2CAP_HDR_SIZE + 2;
-
- if (sk->sk_state != BT_CONNECTED)
- return;
-
- if (pi->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
-
- BT_DBG("pi %p, control 0x%2.2x", pi, control);
-
- count = min_t(unsigned int, conn->mtu, hlen);
- control |= L2CAP_CTRL_FRAME_TYPE;
-
- if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
- control |= L2CAP_CTRL_FINAL;
- pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
- }
-
- if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
- control |= L2CAP_CTRL_POLL;
- pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
- }
-
- skb = bt_skb_alloc(count, GFP_ATOMIC);
- if (!skb)
- return;
-
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(pi->dcid);
- put_unaligned_le16(control, skb_put(skb, 2));
-
- if (pi->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - 2);
- put_unaligned_le16(fcs, skb_put(skb, 2));
- }
-
- hci_send_acl(pi->conn->hcon, skb, 0);
-}
-
-static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
-{
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
- pi->conn_state |= L2CAP_CONN_RNR_SENT;
- } else
- control |= L2CAP_SUPER_RCV_READY;
-
- control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
- l2cap_send_sframe(pi, control);
-}
-
-static inline int __l2cap_no_conn_pending(struct sock *sk)
-{
- return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
-}
-
-static void l2cap_do_start(struct sock *sk)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-
- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
- if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
- return;
-
- if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
- struct l2cap_conn_req req;
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- req.psm = l2cap_pi(sk)->psm;
-
- l2cap_pi(sk)->ident = l2cap_get_ident(conn);
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_REQ, sizeof(req), &req);
- }
- } else {
- struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
-
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
- conn->info_ident = l2cap_get_ident(conn);
-
- mod_timer(&conn->info_timer, jiffies +
- msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
-
- l2cap_send_cmd(conn, conn->info_ident,
- L2CAP_INFO_REQ, sizeof(req), &req);
- }
-}
-
-static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
-{
- u32 local_feat_mask = l2cap_feat_mask;
- if (!disable_ertm)
- local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
-
- switch (mode) {
- case L2CAP_MODE_ERTM:
- return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
- case L2CAP_MODE_STREAMING:
- return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
- default:
- return 0x00;
- }
-}
-
-static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
-{
- struct l2cap_disconn_req req;
-
- if (!conn)
- return;
-
- skb_queue_purge(TX_QUEUE(sk));
-
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
- del_timer(&l2cap_pi(sk)->retrans_timer);
- del_timer(&l2cap_pi(sk)->monitor_timer);
- del_timer(&l2cap_pi(sk)->ack_timer);
- }
-
- req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- l2cap_send_cmd(conn, l2cap_get_ident(conn),
- L2CAP_DISCONN_REQ, sizeof(req), &req);
-
- sk->sk_state = BT_DISCONN;
- sk->sk_err = err;
-}
-
-/* ---- L2CAP connections ---- */
-static void l2cap_conn_start(struct l2cap_conn *conn)
-{
- struct l2cap_chan_list *l = &conn->chan_list;
- struct sock_del_list del, *tmp1, *tmp2;
- struct sock *sk;
-
- BT_DBG("conn %p", conn);
-
- INIT_LIST_HEAD(&del.list);
-
- read_lock(&l->lock);
-
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- bh_lock_sock(sk);
-
- if (sk->sk_type != SOCK_SEQPACKET &&
- sk->sk_type != SOCK_STREAM) {
- bh_unlock_sock(sk);
- continue;
- }
-
- if (sk->sk_state == BT_CONNECT) {
- struct l2cap_conn_req req;
-
- if (!l2cap_check_security(sk) ||
- !__l2cap_no_conn_pending(sk)) {
- bh_unlock_sock(sk);
- continue;
- }
-
- if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
- conn->feat_mask)
- && l2cap_pi(sk)->conf_state &
- L2CAP_CONF_STATE2_DEVICE) {
- tmp1 = kzalloc(sizeof(struct sock_del_list),
- GFP_ATOMIC);
- tmp1->sk = sk;
- list_add_tail(&tmp1->list, &del.list);
- bh_unlock_sock(sk);
- continue;
- }
-
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- req.psm = l2cap_pi(sk)->psm;
-
- l2cap_pi(sk)->ident = l2cap_get_ident(conn);
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_REQ, sizeof(req), &req);
-
- } else if (sk->sk_state == BT_CONNECT2) {
- struct l2cap_conn_rsp rsp;
- char buf[128];
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
-
- if (l2cap_check_security(sk)) {
- if (bt_sk(sk)->defer_setup) {
- struct sock *parent = bt_sk(sk)->parent;
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
- parent->sk_data_ready(parent, 0);
-
- } else {
- sk->sk_state = BT_CONFIG;
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- }
- } else {
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
- }
-
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
- rsp.result != L2CAP_CR_SUCCESS) {
- bh_unlock_sock(sk);
- continue;
- }
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
- }
-
- bh_unlock_sock(sk);
- }
-
- read_unlock(&l->lock);
-
- list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
- bh_lock_sock(tmp1->sk);
- __l2cap_sock_close(tmp1->sk, ECONNRESET);
- bh_unlock_sock(tmp1->sk);
- list_del(&tmp1->list);
- kfree(tmp1);
- }
-}
-
-static void l2cap_conn_ready(struct l2cap_conn *conn)
-{
- struct l2cap_chan_list *l = &conn->chan_list;
- struct sock *sk;
-
- BT_DBG("conn %p", conn);
-
- read_lock(&l->lock);
-
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- bh_lock_sock(sk);
-
- if (sk->sk_type != SOCK_SEQPACKET &&
- sk->sk_type != SOCK_STREAM) {
- l2cap_sock_clear_timer(sk);
- sk->sk_state = BT_CONNECTED;
- sk->sk_state_change(sk);
- } else if (sk->sk_state == BT_CONNECT)
- l2cap_do_start(sk);
-
- bh_unlock_sock(sk);
- }
-
- read_unlock(&l->lock);
-}
-
-/* Notify sockets that we cannot guaranty reliability anymore */
-static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
-{
- struct l2cap_chan_list *l = &conn->chan_list;
- struct sock *sk;
-
- BT_DBG("conn %p", conn);
-
- read_lock(&l->lock);
-
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- if (l2cap_pi(sk)->force_reliable)
- sk->sk_err = err;
- }
-
- read_unlock(&l->lock);
-}
-
-static void l2cap_info_timeout(unsigned long arg)
-{
- struct l2cap_conn *conn = (void *) arg;
-
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
- conn->info_ident = 0;
-
- l2cap_conn_start(conn);
-}
-
-static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
-{
- struct l2cap_conn *conn = hcon->l2cap_data;
-
- if (conn || status)
- return conn;
-
- conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
- if (!conn)
- return NULL;
-
- hcon->l2cap_data = conn;
- conn->hcon = hcon;
-
- BT_DBG("hcon %p conn %p", hcon, conn);
-
- conn->mtu = hcon->hdev->acl_mtu;
- conn->src = &hcon->hdev->bdaddr;
- conn->dst = &hcon->dst;
-
- conn->feat_mask = 0;
-
- spin_lock_init(&conn->lock);
- rwlock_init(&conn->chan_list.lock);
-
- setup_timer(&conn->info_timer, l2cap_info_timeout,
- (unsigned long) conn);
-
- conn->disc_reason = 0x13;
-
- return conn;
-}
-
-static void l2cap_conn_del(struct hci_conn *hcon, int err)
-{
- struct l2cap_conn *conn = hcon->l2cap_data;
- struct sock *sk;
-
- if (!conn)
- return;
-
- BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
-
- kfree_skb(conn->rx_skb);
-
- /* Kill channels */
- while ((sk = conn->chan_list.head)) {
- bh_lock_sock(sk);
- l2cap_chan_del(sk, err);
- bh_unlock_sock(sk);
- l2cap_sock_kill(sk);
- }
-
- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
- del_timer_sync(&conn->info_timer);
-
- hcon->l2cap_data = NULL;
- kfree(conn);
-}
-
-static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
-{
- struct l2cap_chan_list *l = &conn->chan_list;
- write_lock_bh(&l->lock);
- __l2cap_chan_add(conn, sk, parent);
- write_unlock_bh(&l->lock);
-}
-
-/* ---- Socket interface ---- */
-static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
-{
- struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &l2cap_sk_list.head)
- if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
- goto found;
- sk = NULL;
-found:
- return sk;
-}
-
-/* Find socket with psm and source bdaddr.
- * Returns closest match.
- */
-static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
-{
- struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
-
- read_lock(&l2cap_sk_list.lock);
-
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- if (state && sk->sk_state != state)
- continue;
-
- if (l2cap_pi(sk)->psm == psm) {
- /* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
- break;
-
- /* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- sk1 = sk;
- }
- }
-
- read_unlock(&l2cap_sk_list.lock);
-
- return node ? sk : sk1;
-}
-
-static void l2cap_sock_destruct(struct sock *sk)
-{
- BT_DBG("sk %p", sk);
-
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-}
-
-static void l2cap_sock_cleanup_listen(struct sock *parent)
-{
- struct sock *sk;
-
- BT_DBG("parent %p", parent);
-
- /* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL)))
- l2cap_sock_close(sk);
-
- parent->sk_state = BT_CLOSED;
- sock_set_flag(parent, SOCK_ZAPPED);
-}
-
-/* Kill socket (only if zapped and orphan)
- * Must be called on unlocked socket.
- */
-static void l2cap_sock_kill(struct sock *sk)
-{
- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
- return;
-
- BT_DBG("sk %p state %d", sk, sk->sk_state);
-
- /* Kill poor orphan */
- bt_sock_unlink(&l2cap_sk_list, sk);
- sock_set_flag(sk, SOCK_DEAD);
- sock_put(sk);
-}
-
-static void __l2cap_sock_close(struct sock *sk, int reason)
-{
- BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
-
- switch (sk->sk_state) {
- case BT_LISTEN:
- l2cap_sock_cleanup_listen(sk);
- break;
-
- case BT_CONNECTED:
- case BT_CONFIG:
- if (sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) {
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
-
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
- l2cap_send_disconn_req(conn, sk, reason);
- } else
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT2:
- if (sk->sk_type == SOCK_SEQPACKET ||
- sk->sk_type == SOCK_STREAM) {
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct l2cap_conn_rsp rsp;
- __u16 result;
-
- if (bt_sk(sk)->defer_setup)
- result = L2CAP_CR_SEC_BLOCK;
- else
- result = L2CAP_CR_BAD_PSM;
- sk->sk_state = BT_DISCONN;
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
- } else
- l2cap_chan_del(sk, reason);
- break;
-
- case BT_CONNECT:
- case BT_DISCONN:
- l2cap_chan_del(sk, reason);
- break;
-
- default:
- sock_set_flag(sk, SOCK_ZAPPED);
- break;
- }
-}
-
-/* Must be called on unlocked socket. */
-static void l2cap_sock_close(struct sock *sk)
-{
- l2cap_sock_clear_timer(sk);
- lock_sock(sk);
- __l2cap_sock_close(sk, ECONNRESET);
- release_sock(sk);
- l2cap_sock_kill(sk);
-}
-
-static void l2cap_sock_init(struct sock *sk, struct sock *parent)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- BT_DBG("sk %p", sk);
-
- if (parent) {
- sk->sk_type = parent->sk_type;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
-
- pi->imtu = l2cap_pi(parent)->imtu;
- pi->omtu = l2cap_pi(parent)->omtu;
- pi->conf_state = l2cap_pi(parent)->conf_state;
- pi->mode = l2cap_pi(parent)->mode;
- pi->fcs = l2cap_pi(parent)->fcs;
- pi->max_tx = l2cap_pi(parent)->max_tx;
- pi->tx_win = l2cap_pi(parent)->tx_win;
- pi->sec_level = l2cap_pi(parent)->sec_level;
- pi->role_switch = l2cap_pi(parent)->role_switch;
- pi->force_reliable = l2cap_pi(parent)->force_reliable;
- } else {
- pi->imtu = L2CAP_DEFAULT_MTU;
- pi->omtu = 0;
- if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
- pi->mode = L2CAP_MODE_ERTM;
- pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
- } else {
- pi->mode = L2CAP_MODE_BASIC;
- }
- pi->max_tx = L2CAP_DEFAULT_MAX_TX;
- pi->fcs = L2CAP_FCS_CRC16;
- pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- pi->sec_level = BT_SECURITY_LOW;
- pi->role_switch = 0;
- pi->force_reliable = 0;
- }
-
- /* Default config options */
- pi->conf_len = 0;
- pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
- skb_queue_head_init(TX_QUEUE(sk));
- skb_queue_head_init(SREJ_QUEUE(sk));
- skb_queue_head_init(BUSY_QUEUE(sk));
- INIT_LIST_HEAD(SREJ_LIST(sk));
-}
-
-static struct proto l2cap_proto = {
- .name = "L2CAP",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct l2cap_pinfo)
-};
-
-static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
-{
- struct sock *sk;
-
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
- if (!sk)
- return NULL;
-
- sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
- sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
-
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = proto;
- sk->sk_state = BT_OPEN;
-
- setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
-
- bt_sock_link(&l2cap_sk_list, sk);
- return sk;
-}
-
-static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- struct sock *sk;
-
- BT_DBG("sock %p", sock);
-
- sock->state = SS_UNCONNECTED;
-
- if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
- sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
- return -ESOCKTNOSUPPORT;
-
- if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
- return -EPERM;
-
- sock->ops = &l2cap_sock_ops;
-
- sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
- if (!sk)
- return -ENOMEM;
-
- l2cap_sock_init(sk, NULL);
- return 0;
-}
-
-static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_l2 la;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (!addr || addr->sa_family != AF_BLUETOOTH)
- return -EINVAL;
-
- memset(&la, 0, sizeof(la));
- len = min_t(unsigned int, sizeof(la), alen);
- memcpy(&la, addr, len);
-
- if (la.l2_cid)
- return -EINVAL;
-
- lock_sock(sk);
-
- if (sk->sk_state != BT_OPEN) {
- err = -EBADFD;
- goto done;
- }
-
- if (la.l2_psm) {
- __u16 psm = __le16_to_cpu(la.l2_psm);
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((psm & 0x0101) != 0x0001) {
- err = -EINVAL;
- goto done;
- }
-
- /* Restrict usage of well-known PSMs */
- if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
- err = -EACCES;
- goto done;
- }
- }
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
- l2cap_pi(sk)->sport = la.l2_psm;
- sk->sk_state = BT_BOUND;
-
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
- l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_do_connect(struct sock *sk)
-{
- bdaddr_t *src = &bt_sk(sk)->src;
- bdaddr_t *dst = &bt_sk(sk)->dst;
- struct l2cap_conn *conn;
- struct hci_conn *hcon;
- struct hci_dev *hdev;
- __u8 auth_type;
- int err;
-
- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- l2cap_pi(sk)->psm);
-
- hdev = hci_get_route(dst, src);
- if (!hdev)
- return -EHOSTUNREACH;
-
- hci_dev_lock_bh(hdev);
-
- err = -ENOMEM;
-
- auth_type = l2cap_get_auth_type(sk);
-
- hcon = hci_connect(hdev, ACL_LINK, dst,
- l2cap_pi(sk)->sec_level, auth_type);
- if (!hcon)
- goto done;
-
- conn = l2cap_conn_add(hcon, 0);
- if (!conn) {
- hci_conn_put(hcon);
- goto done;
- }
-
- err = 0;
-
- /* Update source addr of the socket */
- bacpy(src, conn->src);
-
- l2cap_chan_add(conn, sk, NULL);
-
- sk->sk_state = BT_CONNECT;
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-
- if (hcon->state == BT_CONNECTED) {
- if (sk->sk_type != SOCK_SEQPACKET &&
- sk->sk_type != SOCK_STREAM) {
- l2cap_sock_clear_timer(sk);
- if (l2cap_check_security(sk))
- sk->sk_state = BT_CONNECTED;
- } else
- l2cap_do_start(sk);
- }
-
-done:
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
- return err;
-}
-
-static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
-{
- struct sock *sk = sock->sk;
- struct sockaddr_l2 la;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (!addr || alen < sizeof(addr->sa_family) ||
- addr->sa_family != AF_BLUETOOTH)
- return -EINVAL;
-
- memset(&la, 0, sizeof(la));
- len = min_t(unsigned int, sizeof(la), alen);
- memcpy(&la, addr, len);
-
- if (la.l2_cid)
- return -EINVAL;
-
- lock_sock(sk);
-
- if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
- && !la.l2_psm) {
- err = -EINVAL;
- goto done;
- }
-
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- switch (sk->sk_state) {
- case BT_CONNECT:
- case BT_CONNECT2:
- case BT_CONFIG:
- /* Already connecting */
- goto wait;
-
- case BT_CONNECTED:
- /* Already connected */
- err = -EISCONN;
- goto done;
-
- case BT_OPEN:
- case BT_BOUND:
- /* Can connect */
- break;
-
- default:
- err = -EBADFD;
- goto done;
- }
-
- /* PSM must be odd and lsb of upper byte must be 0 */
- if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
- sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- goto done;
- }
-
- /* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
- l2cap_pi(sk)->psm = la.l2_psm;
-
- err = l2cap_do_connect(sk);
- if (err)
- goto done;
-
-wait:
- err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_listen(struct socket *sock, int backlog)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- BT_DBG("sk %p backlog %d", sk, backlog);
-
- lock_sock(sk);
-
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
- err = -EBADFD;
- goto done;
- }
-
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -ENOTSUPP;
- goto done;
- }
-
- if (!l2cap_pi(sk)->psm) {
- bdaddr_t *src = &bt_sk(sk)->src;
- u16 psm;
-
- err = -EINVAL;
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- for (psm = 0x1001; psm < 0x1100; psm += 2)
- if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
- l2cap_pi(sk)->psm = cpu_to_le16(psm);
- l2cap_pi(sk)->sport = cpu_to_le16(psm);
- err = 0;
- break;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
- if (err < 0)
- goto done;
- }
-
- sk->sk_max_ack_backlog = backlog;
- sk->sk_ack_backlog = 0;
- sk->sk_state = BT_LISTEN;
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct sock *sk = sock->sk, *nsk;
- long timeo;
- int err = 0;
-
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
-
- BT_DBG("sk %p timeo %ld", sk, timeo);
-
- /* Wait for an incoming connection. (wake-one). */
- add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- break;
- }
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(sk), &wait);
-
- if (err)
- goto done;
-
- newsock->state = SS_CONNECTED;
-
- BT_DBG("new socket %p", nsk);
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
-{
- struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
- struct sock *sk = sock->sk;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- addr->sa_family = AF_BLUETOOTH;
- *len = sizeof(struct sockaddr_l2);
-
- if (peer) {
- la->l2_psm = l2cap_pi(sk)->psm;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- } else {
- la->l2_psm = l2cap_pi(sk)->sport;
- bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
- la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
- }
-
- return 0;
-}
-
-static int __l2cap_wait_ack(struct sock *sk)
-{
- DECLARE_WAITQUEUE(wait, current);
- int err = 0;
- int timeo = HZ/5;
-
- add_wait_queue(sk_sleep(sk), &wait);
- while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (!timeo)
- timeo = HZ/5;
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
-
- err = sock_error(sk);
- if (err)
- break;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(sk), &wait);
- return err;
-}
-
-static void l2cap_monitor_timeout(unsigned long arg)
-{
- struct sock *sk = (void *) arg;
-
- BT_DBG("sk %p", sk);
-
- bh_lock_sock(sk);
- if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
- l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
- bh_unlock_sock(sk);
- return;
- }
-
- l2cap_pi(sk)->retry_count++;
- __mod_monitor_timer();
-
- l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
-}
-
-static void l2cap_retrans_timeout(unsigned long arg)
-{
- struct sock *sk = (void *) arg;
-
- BT_DBG("sk %p", sk);
-
- bh_lock_sock(sk);
- l2cap_pi(sk)->retry_count = 1;
- __mod_monitor_timer();
-
- l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
-
- l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
- bh_unlock_sock(sk);
-}
-
-static void l2cap_drop_acked_frames(struct sock *sk)
-{
- struct sk_buff *skb;
-
- while ((skb = skb_peek(TX_QUEUE(sk))) &&
- l2cap_pi(sk)->unacked_frames) {
- if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
- break;
-
- skb = skb_dequeue(TX_QUEUE(sk));
- kfree_skb(skb);
-
- l2cap_pi(sk)->unacked_frames--;
- }
-
- if (!l2cap_pi(sk)->unacked_frames)
- del_timer(&l2cap_pi(sk)->retrans_timer);
-}
-
-static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
-
- hci_send_acl(pi->conn->hcon, skb, 0);
-}
-
-static void l2cap_streaming_send(struct sock *sk)
-{
- struct sk_buff *skb;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u16 control, fcs;
-
- while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
- control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
- control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
- put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
-
- if (pi->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
- put_unaligned_le16(fcs, skb->data + skb->len - 2);
- }
-
- l2cap_do_send(sk, skb);
-
- pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
- }
-}
-
-static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *skb, *tx_skb;
- u16 control, fcs;
-
- skb = skb_peek(TX_QUEUE(sk));
- if (!skb)
- return;
-
- do {
- if (bt_cb(skb)->tx_seq == tx_seq)
- break;
-
- if (skb_queue_is_last(TX_QUEUE(sk), skb))
- return;
-
- } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
-
- if (pi->remote_max_tx &&
- bt_cb(skb)->retries == pi->remote_max_tx) {
- l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
- return;
- }
-
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-
- if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
- control |= L2CAP_CTRL_FINAL;
- pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
- }
-
- control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
-
- if (pi->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
- }
-
- l2cap_do_send(sk, tx_skb);
-}
-
-static int l2cap_ertm_send(struct sock *sk)
-{
- struct sk_buff *skb, *tx_skb;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u16 control, fcs;
- int nsent = 0;
-
- if (sk->sk_state != BT_CONNECTED)
- return -ENOTCONN;
-
- while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
-
- if (pi->remote_max_tx &&
- bt_cb(skb)->retries == pi->remote_max_tx) {
- l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
- break;
- }
-
- tx_skb = skb_clone(skb, GFP_ATOMIC);
-
- bt_cb(skb)->retries++;
-
- control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
- control &= L2CAP_CTRL_SAR;
-
- if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
- control |= L2CAP_CTRL_FINAL;
- pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
- }
- control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
- | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
- put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
-
-
- if (pi->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
- put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
- }
-
- l2cap_do_send(sk, tx_skb);
-
- __mod_retrans_timer();
-
- bt_cb(skb)->tx_seq = pi->next_tx_seq;
- pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
-
- pi->unacked_frames++;
- pi->frames_sent++;
-
- if (skb_queue_is_last(TX_QUEUE(sk), skb))
- sk->sk_send_head = NULL;
- else
- sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
-
- nsent++;
- }
-
- return nsent;
-}
-
-static int l2cap_retransmit_frames(struct sock *sk)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- int ret;
-
- if (!skb_queue_empty(TX_QUEUE(sk)))
- sk->sk_send_head = TX_QUEUE(sk)->next;
-
- pi->next_tx_seq = pi->expected_ack_seq;
- ret = l2cap_ertm_send(sk);
- return ret;
-}
-
-static void l2cap_send_ack(struct l2cap_pinfo *pi)
-{
- struct sock *sk = (struct sock *)pi;
- u16 control = 0;
-
- control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
- pi->conn_state |= L2CAP_CONN_RNR_SENT;
- l2cap_send_sframe(pi, control);
- return;
- }
-
- if (l2cap_ertm_send(sk) > 0)
- return;
-
- control |= L2CAP_SUPER_RCV_READY;
- l2cap_send_sframe(pi, control);
-}
-
-static void l2cap_send_srejtail(struct sock *sk)
-{
- struct srej_list *tail;
- u16 control;
-
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= L2CAP_CTRL_FINAL;
-
- tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
- control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
- l2cap_send_sframe(l2cap_pi(sk), control);
-}
-
-static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff **frag;
- int err, sent = 0;
-
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
- return -EFAULT;
-
- sent += count;
- len -= count;
-
- /* Continuation fragments (no L2CAP header) */
- frag = &skb_shinfo(skb)->frag_list;
- while (len) {
- count = min_t(unsigned int, conn->mtu, len);
-
- *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
- if (!*frag)
- return err;
- if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
- return -EFAULT;
-
- sent += count;
- len -= count;
-
- frag = &(*frag)->next;
- }
-
- return sent;
-}
-
-static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
- struct l2cap_hdr *lh;
-
- BT_DBG("sk %p len %d", sk, (int)len);
-
- count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return ERR_PTR(err);
-
- /* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
-
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
- if (unlikely(err < 0)) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
- return skb;
-}
-
-static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE;
- struct l2cap_hdr *lh;
-
- BT_DBG("sk %p len %d", sk, (int)len);
-
- count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return ERR_PTR(err);
-
- /* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
- if (unlikely(err < 0)) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
- return skb;
-}
-
-static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
-{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE + 2;
- struct l2cap_hdr *lh;
-
- BT_DBG("sk %p len %d", sk, (int)len);
-
- if (!conn)
- return ERR_PTR(-ENOTCONN);
-
- if (sdulen)
- hlen += 2;
-
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
- hlen += 2;
-
- count = min_t(unsigned int, (conn->mtu - hlen), len);
- skb = bt_skb_send_alloc(sk, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return ERR_PTR(err);
-
- /* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(control, skb_put(skb, 2));
- if (sdulen)
- put_unaligned_le16(sdulen, skb_put(skb, 2));
-
- err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
- if (unlikely(err < 0)) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
-
- if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, 2));
-
- bt_cb(skb)->retries = 0;
- return skb;
-}
-
-static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *skb;
- struct sk_buff_head sar_queue;
- u16 control;
- size_t size = 0;
-
- skb_queue_head_init(&sar_queue);
- control = L2CAP_SDU_START;
- skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- __skb_queue_tail(&sar_queue, skb);
- len -= pi->remote_mps;
- size += pi->remote_mps;
-
- while (len > 0) {
- size_t buflen;
-
- if (len > pi->remote_mps) {
- control = L2CAP_SDU_CONTINUE;
- buflen = pi->remote_mps;
- } else {
- control = L2CAP_SDU_END;
- buflen = len;
- }
-
- skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
- if (IS_ERR(skb)) {
- skb_queue_purge(&sar_queue);
- return PTR_ERR(skb);
- }
-
- __skb_queue_tail(&sar_queue, skb);
- len -= buflen;
- size += buflen;
- }
- skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
- if (sk->sk_send_head == NULL)
- sk->sk_send_head = sar_queue.next;
-
- return size;
-}
-
-static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
-{
- struct sock *sk = sock->sk;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *skb;
- u16 control;
- int err;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- err = sock_error(sk);
- if (err)
- return err;
-
- if (msg->msg_flags & MSG_OOB)
- return -EOPNOTSUPP;
-
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- err = -ENOTCONN;
- goto done;
- }
-
- /* Connectionless channel */
- if (sk->sk_type == SOCK_DGRAM) {
- skb = l2cap_create_connless_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- } else {
- l2cap_do_send(sk, skb);
- err = len;
- }
- goto done;
- }
-
- switch (pi->mode) {
- case L2CAP_MODE_BASIC:
- /* Check outgoing MTU */
- if (len > pi->omtu) {
- err = -EMSGSIZE;
- goto done;
- }
-
- /* Create a basic PDU */
- skb = l2cap_create_basic_pdu(sk, msg, len);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto done;
- }
-
- l2cap_do_send(sk, skb);
- err = len;
- break;
-
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= pi->remote_mps) {
- control = L2CAP_SDU_UNSEGMENTED;
- skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto done;
- }
- __skb_queue_tail(TX_QUEUE(sk), skb);
-
- if (sk->sk_send_head == NULL)
- sk->sk_send_head = skb;
-
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(sk, msg, len);
- if (err < 0)
- goto done;
- }
-
- if (pi->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(sk);
- } else {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->conn_state & L2CAP_CONN_WAIT_F)) {
- err = len;
- break;
- }
- err = l2cap_ertm_send(sk);
- }
-
- if (err >= 0)
- err = len;
- break;
-
- default:
- BT_DBG("bad state %1.1x", pi->mode);
- err = -EBADFD;
- }
-
-done:
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
-{
- struct sock *sk = sock->sk;
-
- lock_sock(sk);
-
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
- struct l2cap_conn_rsp rsp;
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- u8 buf[128];
-
- sk->sk_state = BT_CONFIG;
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
- release_sock(sk);
- return 0;
- }
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
-
- release_sock(sk);
- return 0;
- }
-
- release_sock(sk);
-
- if (sock->type == SOCK_STREAM)
- return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
-
- return bt_sock_recvmsg(iocb, sock, msg, len, flags);
-}
-
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct l2cap_options opts;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- lock_sock(sk);
-
- switch (optname) {
- case L2CAP_OPTIONS:
- if (sk->sk_state == BT_CONNECTED) {
- err = -EINVAL;
- break;
- }
-
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
- len = min_t(unsigned int, sizeof(opts), optlen);
- if (copy_from_user((char *) &opts, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->mode = opts.mode;
- switch (l2cap_pi(sk)->mode) {
- case L2CAP_MODE_BASIC:
- l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
- break;
- case L2CAP_MODE_ERTM:
- case L2CAP_MODE_STREAMING:
- if (!disable_ertm)
- break;
- /* fall through */
- default:
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->imtu = opts.imtu;
- l2cap_pi(sk)->omtu = opts.omtu;
- l2cap_pi(sk)->fcs = opts.fcs;
- l2cap_pi(sk)->max_tx = opts.max_tx;
- l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
- break;
-
- case L2CAP_LM:
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- if (opt & L2CAP_LM_AUTH)
- l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
- if (opt & L2CAP_LM_ENCRYPT)
- l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
- if (opt & L2CAP_LM_SECURE)
- l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
-
- l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
- l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
-{
- struct sock *sk = sock->sk;
- struct bt_security sec;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- if (level == SOL_L2CAP)
- return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
-
- if (level != SOL_BLUETOOTH)
- return -ENOPROTOOPT;
-
- lock_sock(sk);
-
- switch (optname) {
- case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- break;
- }
-
- sec.level = BT_SECURITY_LOW;
-
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_user((char *) &sec, optval, len)) {
- err = -EFAULT;
- break;
- }
-
- if (sec.level < BT_SECURITY_LOW ||
- sec.level > BT_SECURITY_HIGH) {
- err = -EINVAL;
- break;
- }
-
- l2cap_pi(sk)->sec_level = sec.level;
- break;
-
- case BT_DEFER_SETUP:
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- if (get_user(opt, (u32 __user *) optval)) {
- err = -EFAULT;
- break;
- }
-
- bt_sk(sk)->defer_setup = opt;
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct l2cap_options opts;
- struct l2cap_conninfo cinfo;
- int len, err = 0;
- u32 opt;
-
- BT_DBG("sk %p", sk);
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- lock_sock(sk);
-
- switch (optname) {
- case L2CAP_OPTIONS:
- opts.imtu = l2cap_pi(sk)->imtu;
- opts.omtu = l2cap_pi(sk)->omtu;
- opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = l2cap_pi(sk)->mode;
- opts.fcs = l2cap_pi(sk)->fcs;
- opts.max_tx = l2cap_pi(sk)->max_tx;
- opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
-
- len = min_t(unsigned int, len, sizeof(opts));
- if (copy_to_user(optval, (char *) &opts, len))
- err = -EFAULT;
-
- break;
-
- case L2CAP_LM:
- switch (l2cap_pi(sk)->sec_level) {
- case BT_SECURITY_LOW:
- opt = L2CAP_LM_AUTH;
- break;
- case BT_SECURITY_MEDIUM:
- opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
- break;
- case BT_SECURITY_HIGH:
- opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
- L2CAP_LM_SECURE;
- break;
- default:
- opt = 0;
- break;
- }
-
- if (l2cap_pi(sk)->role_switch)
- opt |= L2CAP_LM_MASTER;
-
- if (l2cap_pi(sk)->force_reliable)
- opt |= L2CAP_LM_RELIABLE;
-
- if (put_user(opt, (u32 __user *) optval))
- err = -EFAULT;
- break;
-
- case L2CAP_CONNINFO:
- if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
- err = -ENOTCONN;
- break;
- }
-
- cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
- memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
-
- len = min_t(unsigned int, len, sizeof(cinfo));
- if (copy_to_user(optval, (char *) &cinfo, len))
- err = -EFAULT;
-
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
-{
- struct sock *sk = sock->sk;
- struct bt_security sec;
- int len, err = 0;
-
- BT_DBG("sk %p", sk);
-
- if (level == SOL_L2CAP)
- return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
-
- if (level != SOL_BLUETOOTH)
- return -ENOPROTOOPT;
-
- if (get_user(len, optlen))
- return -EFAULT;
-
- lock_sock(sk);
-
- switch (optname) {
- case BT_SECURITY:
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
- && sk->sk_type != SOCK_RAW) {
- err = -EINVAL;
- break;
- }
-
- sec.level = l2cap_pi(sk)->sec_level;
-
- len = min_t(unsigned int, len, sizeof(sec));
- if (copy_to_user(optval, (char *) &sec, len))
- err = -EFAULT;
-
- break;
-
- case BT_DEFER_SETUP:
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
- err = -EINVAL;
- break;
- }
-
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
- err = -EFAULT;
-
- break;
-
- default:
- err = -ENOPROTOOPT;
- break;
- }
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_shutdown(struct socket *sock, int how)
-{
- struct sock *sk = sock->sk;
- int err = 0;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- if (!sk)
- return 0;
-
- lock_sock(sk);
- if (!sk->sk_shutdown) {
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
- err = __l2cap_wait_ack(sk);
-
- sk->sk_shutdown = SHUTDOWN_MASK;
- l2cap_sock_clear_timer(sk);
- __l2cap_sock_close(sk, 0);
-
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
- err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
- }
-
- if (!err && sk->sk_err)
- err = -sk->sk_err;
-
- release_sock(sk);
- return err;
-}
-
-static int l2cap_sock_release(struct socket *sock)
-{
- struct sock *sk = sock->sk;
- int err;
-
- BT_DBG("sock %p, sk %p", sock, sk);
-
- if (!sk)
- return 0;
-
- err = l2cap_sock_shutdown(sock, 2);
-
- sock_orphan(sk);
- l2cap_sock_kill(sk);
- return err;
-}
-
-static void l2cap_chan_ready(struct sock *sk)
-{
- struct sock *parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
- l2cap_pi(sk)->conf_state = 0;
- l2cap_sock_clear_timer(sk);
-
- if (!parent) {
- /* Outgoing channel.
- * Wake up socket sleeping on connect.
- */
- sk->sk_state = BT_CONNECTED;
- sk->sk_state_change(sk);
- } else {
- /* Incoming channel.
- * Wake up socket sleeping on accept.
- */
- parent->sk_data_ready(parent, 0);
- }
-}
-
-/* Copy frame to all raw sockets on that connection */
-static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
-{
- struct l2cap_chan_list *l = &conn->chan_list;
- struct sk_buff *nskb;
- struct sock *sk;
-
- BT_DBG("conn %p", conn);
-
- read_lock(&l->lock);
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- if (sk->sk_type != SOCK_RAW)
- continue;
-
- /* Don't send frame to the socket it came from */
- if (skb->sk == sk)
- continue;
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- continue;
-
- if (sock_queue_rcv_skb(sk, nskb))
- kfree_skb(nskb);
- }
- read_unlock(&l->lock);
-}
-
-/* ---- L2CAP signalling commands ---- */
-static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- u8 code, u8 ident, u16 dlen, void *data)
-{
- struct sk_buff *skb, **frag;
- struct l2cap_cmd_hdr *cmd;
- struct l2cap_hdr *lh;
- int len, count;
-
- BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
- conn, code, ident, dlen);
-
- len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
- count = min_t(unsigned int, conn->mtu, len);
-
- skb = bt_skb_alloc(count, GFP_ATOMIC);
- if (!skb)
- return NULL;
-
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
- lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
-
- cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
- cmd->code = code;
- cmd->ident = ident;
- cmd->len = cpu_to_le16(dlen);
-
- if (dlen) {
- count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
- memcpy(skb_put(skb, count), data, count);
- data += count;
- }
-
- len -= skb->len;
-
- /* Continuation fragments (no L2CAP header) */
- frag = &skb_shinfo(skb)->frag_list;
- while (len) {
- count = min_t(unsigned int, conn->mtu, len);
-
- *frag = bt_skb_alloc(count, GFP_ATOMIC);
- if (!*frag)
- goto fail;
-
- memcpy(skb_put(*frag, count), data, count);
-
- len -= count;
- data += count;
-
- frag = &(*frag)->next;
- }
-
- return skb;
-
-fail:
- kfree_skb(skb);
- return NULL;
-}
-
-static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
-{
- struct l2cap_conf_opt *opt = *ptr;
- int len;
-
- len = L2CAP_CONF_OPT_SIZE + opt->len;
- *ptr += len;
-
- *type = opt->type;
- *olen = opt->len;
-
- switch (opt->len) {
- case 1:
- *val = *((u8 *) opt->val);
- break;
-
- case 2:
- *val = get_unaligned_le16(opt->val);
- break;
-
- case 4:
- *val = get_unaligned_le32(opt->val);
- break;
-
- default:
- *val = (unsigned long) opt->val;
- break;
- }
-
- BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
- return len;
-}
-
-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
-{
- struct l2cap_conf_opt *opt = *ptr;
-
- BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
-
- opt->type = type;
- opt->len = len;
-
- switch (len) {
- case 1:
- *((u8 *) opt->val) = val;
- break;
-
- case 2:
- put_unaligned_le16(val, opt->val);
- break;
-
- case 4:
- put_unaligned_le32(val, opt->val);
- break;
-
- default:
- memcpy(opt->val, (void *) val, len);
- break;
- }
-
- *ptr += L2CAP_CONF_OPT_SIZE + len;
-}
-
-static void l2cap_ack_timeout(unsigned long arg)
-{
- struct sock *sk = (void *) arg;
-
- bh_lock_sock(sk);
- l2cap_send_ack(l2cap_pi(sk));
- bh_unlock_sock(sk);
-}
-
-static inline void l2cap_ertm_init(struct sock *sk)
-{
- l2cap_pi(sk)->expected_ack_seq = 0;
- l2cap_pi(sk)->unacked_frames = 0;
- l2cap_pi(sk)->buffer_seq = 0;
- l2cap_pi(sk)->num_acked = 0;
- l2cap_pi(sk)->frames_sent = 0;
-
- setup_timer(&l2cap_pi(sk)->retrans_timer,
- l2cap_retrans_timeout, (unsigned long) sk);
- setup_timer(&l2cap_pi(sk)->monitor_timer,
- l2cap_monitor_timeout, (unsigned long) sk);
- setup_timer(&l2cap_pi(sk)->ack_timer,
- l2cap_ack_timeout, (unsigned long) sk);
-
- __skb_queue_head_init(SREJ_QUEUE(sk));
- __skb_queue_head_init(BUSY_QUEUE(sk));
-
- INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
-
- sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
-}
-
-static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
-{
- switch (mode) {
- case L2CAP_MODE_STREAMING:
- case L2CAP_MODE_ERTM:
- if (l2cap_mode_supported(mode, remote_feat_mask))
- return mode;
- /* fall through */
- default:
- return L2CAP_MODE_BASIC;
- }
-}
-
-static int l2cap_build_conf_req(struct sock *sk, void *data)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = pi->mode };
- void *ptr = req->data;
-
- BT_DBG("sk %p", sk);
-
- if (pi->num_conf_req || pi->num_conf_rsp)
- goto done;
-
- switch (pi->mode) {
- case L2CAP_MODE_STREAMING:
- case L2CAP_MODE_ERTM:
- if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
- break;
-
- /* fall through */
- default:
- pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
- break;
- }
-
-done:
- switch (pi->mode) {
- case L2CAP_MODE_BASIC:
- if (pi->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
-
- if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
- !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
- break;
-
- rfc.mode = L2CAP_MODE_BASIC;
- rfc.txwin_size = 0;
- rfc.max_transmit = 0;
- rfc.retrans_timeout = 0;
- rfc.monitor_timeout = 0;
- rfc.max_pdu_size = 0;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
- break;
-
- case L2CAP_MODE_ERTM:
- rfc.mode = L2CAP_MODE_ERTM;
- rfc.txwin_size = pi->tx_win;
- rfc.max_transmit = pi->max_tx;
- rfc.retrans_timeout = 0;
- rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
-
- if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
- break;
-
- if (pi->fcs == L2CAP_FCS_NONE ||
- pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
- pi->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
- }
- break;
-
- case L2CAP_MODE_STREAMING:
- rfc.mode = L2CAP_MODE_STREAMING;
- rfc.txwin_size = 0;
- rfc.max_transmit = 0;
- rfc.retrans_timeout = 0;
- rfc.monitor_timeout = 0;
- rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
- if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
-
- if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
- break;
-
- if (pi->fcs == L2CAP_FCS_NONE ||
- pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
- pi->fcs = L2CAP_FCS_NONE;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
- }
- break;
- }
-
- /* FIXME: Need actual value of the flush timeout */
- //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
- // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
-
- req->dcid = cpu_to_le16(pi->dcid);
- req->flags = cpu_to_le16(0);
-
- return ptr - data;
-}
-
-static int l2cap_parse_conf_req(struct sock *sk, void *data)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_conf_rsp *rsp = data;
- void *ptr = rsp->data;
- void *req = pi->conf_req;
- int len = pi->conf_len;
- int type, hint, olen;
- unsigned long val;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
- u16 mtu = L2CAP_DEFAULT_MTU;
- u16 result = L2CAP_CONF_SUCCESS;
-
- BT_DBG("sk %p", sk);
-
- while (len >= L2CAP_CONF_OPT_SIZE) {
- len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
-
- hint = type & L2CAP_CONF_HINT;
- type &= L2CAP_CONF_MASK;
-
- switch (type) {
- case L2CAP_CONF_MTU:
- mtu = val;
- break;
-
- case L2CAP_CONF_FLUSH_TO:
- pi->flush_to = val;
- break;
-
- case L2CAP_CONF_QOS:
- break;
-
- case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *) val, olen);
- break;
-
- case L2CAP_CONF_FCS:
- if (val == L2CAP_FCS_NONE)
- pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
-
- break;
-
- default:
- if (hint)
- break;
-
- result = L2CAP_CONF_UNKNOWN;
- *((u8 *) ptr++) = type;
- break;
- }
- }
-
- if (pi->num_conf_rsp || pi->num_conf_req > 1)
- goto done;
-
- switch (pi->mode) {
- case L2CAP_MODE_STREAMING:
- case L2CAP_MODE_ERTM:
- if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
- pi->mode = l2cap_select_mode(rfc.mode,
- pi->conn->feat_mask);
- break;
- }
-
- if (pi->mode != rfc.mode)
- return -ECONNREFUSED;
-
- break;
- }
-
-done:
- if (pi->mode != rfc.mode) {
- result = L2CAP_CONF_UNACCEPT;
- rfc.mode = pi->mode;
-
- if (pi->num_conf_rsp == 1)
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
- }
-
-
- if (result == L2CAP_CONF_SUCCESS) {
- /* Configure output options and let the other side know
- * which ones we don't like. */
-
- if (mtu < L2CAP_DEFAULT_MIN_MTU)
- result = L2CAP_CONF_UNACCEPT;
- else {
- pi->omtu = mtu;
- pi->conf_state |= L2CAP_CONF_MTU_DONE;
- }
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
-
- switch (rfc.mode) {
- case L2CAP_MODE_BASIC:
- pi->fcs = L2CAP_FCS_NONE;
- pi->conf_state |= L2CAP_CONF_MODE_DONE;
- break;
-
- case L2CAP_MODE_ERTM:
- pi->remote_tx_win = rfc.txwin_size;
- pi->remote_max_tx = rfc.max_transmit;
-
- if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
- pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
-
- rfc.retrans_timeout =
- le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
- rfc.monitor_timeout =
- le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
-
- pi->conf_state |= L2CAP_CONF_MODE_DONE;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
-
- break;
-
- case L2CAP_MODE_STREAMING:
- if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
- rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
-
- pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
-
- pi->conf_state |= L2CAP_CONF_MODE_DONE;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
-
- break;
-
- default:
- result = L2CAP_CONF_UNACCEPT;
-
- memset(&rfc, 0, sizeof(rfc));
- rfc.mode = pi->mode;
- }
-
- if (result == L2CAP_CONF_SUCCESS)
- pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
- }
- rsp->scid = cpu_to_le16(pi->dcid);
- rsp->result = cpu_to_le16(result);
- rsp->flags = cpu_to_le16(0x0000);
-
- return ptr - data;
-}
-
-static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_conf_req *req = data;
- void *ptr = req->data;
- int type, olen;
- unsigned long val;
- struct l2cap_conf_rfc rfc;
-
- BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
-
- while (len >= L2CAP_CONF_OPT_SIZE) {
- len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
-
- switch (type) {
- case L2CAP_CONF_MTU:
- if (val < L2CAP_DEFAULT_MIN_MTU) {
- *result = L2CAP_CONF_UNACCEPT;
- pi->imtu = L2CAP_DEFAULT_MIN_MTU;
- } else
- pi->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
- break;
-
- case L2CAP_CONF_FLUSH_TO:
- pi->flush_to = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, pi->flush_to);
- break;
-
- case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
-
- if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
- rfc.mode != pi->mode)
- return -ECONNREFUSED;
-
- pi->fcs = 0;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
- break;
- }
- }
-
- if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
- return -ECONNREFUSED;
-
- pi->mode = rfc.mode;
-
- if (*result == L2CAP_CONF_SUCCESS) {
- switch (rfc.mode) {
- case L2CAP_MODE_ERTM:
- pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
- break;
- case L2CAP_MODE_STREAMING:
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
- }
- }
-
- req->dcid = cpu_to_le16(pi->dcid);
- req->flags = cpu_to_le16(0x0000);
-
- return ptr - data;
-}
-
-static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
-{
- struct l2cap_conf_rsp *rsp = data;
- void *ptr = rsp->data;
-
- BT_DBG("sk %p", sk);
-
- rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp->result = cpu_to_le16(result);
- rsp->flags = cpu_to_le16(flags);
-
- return ptr - data;
-}
-
-static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- int type, olen;
- unsigned long val;
- struct l2cap_conf_rfc rfc;
-
- BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
-
- if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
- return;
-
- while (len >= L2CAP_CONF_OPT_SIZE) {
- len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
-
- switch (type) {
- case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
- goto done;
- }
- }
-
-done:
- switch (rfc.mode) {
- case L2CAP_MODE_ERTM:
- pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
- pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
- break;
- case L2CAP_MODE_STREAMING:
- pi->mps = le16_to_cpu(rfc.max_pdu_size);
- }
-}
-
-static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
-
- if (rej->reason != 0x0000)
- return 0;
-
- if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
- cmd->ident == conn->info_ident) {
- del_timer(&conn->info_timer);
-
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
- conn->info_ident = 0;
-
- l2cap_conn_start(conn);
- }
-
- return 0;
-}
-
-static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_chan_list *list = &conn->chan_list;
- struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
- struct l2cap_conn_rsp rsp;
- struct sock *parent, *sk = NULL;
- int result, status = L2CAP_CS_NO_INFO;
-
- u16 dcid = 0, scid = __le16_to_cpu(req->scid);
- __le16 psm = req->psm;
-
- BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
-
- /* Check if we have socket listening on psm */
- parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
- if (!parent) {
- result = L2CAP_CR_BAD_PSM;
- goto sendresp;
- }
-
- bh_lock_sock(parent);
-
- /* Check if the ACL is secure enough (if not SDP) */
- if (psm != cpu_to_le16(0x0001) &&
- !hci_conn_check_link_mode(conn->hcon)) {
- conn->disc_reason = 0x05;
- result = L2CAP_CR_SEC_BLOCK;
- goto response;
- }
-
- result = L2CAP_CR_NO_MEM;
-
- /* Check for backlog size */
- if (sk_acceptq_is_full(parent)) {
- BT_DBG("backlog full %d", parent->sk_ack_backlog);
- goto response;
- }
-
- sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
- if (!sk)
- goto response;
-
- write_lock_bh(&list->lock);
-
- /* Check if we already have channel with that dcid */
- if (__l2cap_get_chan_by_dcid(list, scid)) {
- write_unlock_bh(&list->lock);
- sock_set_flag(sk, SOCK_ZAPPED);
- l2cap_sock_kill(sk);
- goto response;
- }
-
- hci_conn_hold(conn->hcon);
-
- l2cap_sock_init(sk, parent);
- bacpy(&bt_sk(sk)->src, conn->src);
- bacpy(&bt_sk(sk)->dst, conn->dst);
- l2cap_pi(sk)->psm = psm;
- l2cap_pi(sk)->dcid = scid;
-
- __l2cap_chan_add(conn, sk, parent);
- dcid = l2cap_pi(sk)->scid;
-
- l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-
- l2cap_pi(sk)->ident = cmd->ident;
-
- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
- if (l2cap_check_security(sk)) {
- if (bt_sk(sk)->defer_setup) {
- sk->sk_state = BT_CONNECT2;
- result = L2CAP_CR_PEND;
- status = L2CAP_CS_AUTHOR_PEND;
- parent->sk_data_ready(parent, 0);
- } else {
- sk->sk_state = BT_CONFIG;
- result = L2CAP_CR_SUCCESS;
- status = L2CAP_CS_NO_INFO;
- }
- } else {
- sk->sk_state = BT_CONNECT2;
- result = L2CAP_CR_PEND;
- status = L2CAP_CS_AUTHEN_PEND;
- }
- } else {
- sk->sk_state = BT_CONNECT2;
- result = L2CAP_CR_PEND;
- status = L2CAP_CS_NO_INFO;
- }
-
- write_unlock_bh(&list->lock);
-
-response:
- bh_unlock_sock(parent);
-
-sendresp:
- rsp.scid = cpu_to_le16(scid);
- rsp.dcid = cpu_to_le16(dcid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(status);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
-
- if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
- struct l2cap_info_req info;
- info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
-
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
- conn->info_ident = l2cap_get_ident(conn);
-
- mod_timer(&conn->info_timer, jiffies +
- msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
-
- l2cap_send_cmd(conn, conn->info_ident,
- L2CAP_INFO_REQ, sizeof(info), &info);
- }
-
- if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
- result == L2CAP_CR_SUCCESS) {
- u8 buf[128];
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
- }
-
- return 0;
-}
-
-static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
- u16 scid, dcid, result, status;
- struct sock *sk;
- u8 req[128];
-
- scid = __le16_to_cpu(rsp->scid);
- dcid = __le16_to_cpu(rsp->dcid);
- result = __le16_to_cpu(rsp->result);
- status = __le16_to_cpu(rsp->status);
-
- BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
-
- if (scid) {
- sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
- if (!sk)
- return -EFAULT;
- } else {
- sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
- if (!sk)
- return -EFAULT;
- }
-
- switch (result) {
- case L2CAP_CR_SUCCESS:
- sk->sk_state = BT_CONFIG;
- l2cap_pi(sk)->ident = 0;
- l2cap_pi(sk)->dcid = dcid;
- l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
- break;
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, req), req);
- l2cap_pi(sk)->num_conf_req++;
- break;
-
- case L2CAP_CR_PEND:
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
- break;
-
- default:
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 5);
- break;
- }
-
- l2cap_chan_del(sk, ECONNREFUSED);
- break;
- }
-
- bh_unlock_sock(sk);
- return 0;
-}
-
-static inline void set_default_fcs(struct l2cap_pinfo *pi)
-{
- /* FCS is enabled only in ERTM or streaming mode, if one or both
- * sides request it.
- */
- if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
- pi->fcs = L2CAP_FCS_NONE;
- else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
- pi->fcs = L2CAP_FCS_CRC16;
-}
-
-static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
-{
- struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
- u16 dcid, flags;
- u8 rsp[64];
- struct sock *sk;
- int len;
-
- dcid = __le16_to_cpu(req->dcid);
- flags = __le16_to_cpu(req->flags);
-
- BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
-
- sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
- if (!sk)
- return -ENOENT;
-
- if (sk->sk_state != BT_CONFIG) {
- struct l2cap_cmd_rej rej;
-
- rej.reason = cpu_to_le16(0x0002);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
- sizeof(rej), &rej);
- goto unlock;
- }
-
- /* Reject if config buffer is too small. */
- len = cmd_len - sizeof(*req);
- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(sk, rsp,
- L2CAP_CONF_REJECT, flags), rsp);
- goto unlock;
- }
-
- /* Store config. */
- memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
- l2cap_pi(sk)->conf_len += len;
-
- if (flags & 0x0001) {
- /* Incomplete config. Send empty response. */
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
- l2cap_build_conf_rsp(sk, rsp,
- L2CAP_CONF_SUCCESS, 0x0001), rsp);
- goto unlock;
- }
-
- /* Complete config. */
- len = l2cap_parse_conf_req(sk, rsp);
- if (len < 0) {
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
- goto unlock;
- }
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
- l2cap_pi(sk)->num_conf_rsp++;
-
- /* Reset config buffer. */
- l2cap_pi(sk)->conf_len = 0;
-
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
- goto unlock;
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
- set_default_fcs(l2cap_pi(sk));
-
- sk->sk_state = BT_CONNECTED;
-
- l2cap_pi(sk)->next_tx_seq = 0;
- l2cap_pi(sk)->expected_tx_seq = 0;
- __skb_queue_head_init(TX_QUEUE(sk));
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(sk);
-
- l2cap_chan_ready(sk);
- goto unlock;
- }
-
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
- u8 buf[64];
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, buf), buf);
- l2cap_pi(sk)->num_conf_req++;
- }
-
-unlock:
- bh_unlock_sock(sk);
- return 0;
-}
-
-static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
- u16 scid, flags, result;
- struct sock *sk;
- int len = cmd->len - sizeof(*rsp);
-
- scid = __le16_to_cpu(rsp->scid);
- flags = __le16_to_cpu(rsp->flags);
- result = __le16_to_cpu(rsp->result);
-
- BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
- scid, flags, result);
-
- sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
- if (!sk)
- return 0;
-
- switch (result) {
- case L2CAP_CONF_SUCCESS:
- l2cap_conf_rfc_get(sk, rsp->data, len);
- break;
-
- case L2CAP_CONF_UNACCEPT:
- if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
- char req[64];
-
- if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
- goto done;
- }
-
- /* throw out any old stored conf requests */
- result = L2CAP_CONF_SUCCESS;
- len = l2cap_parse_conf_rsp(sk, rsp->data,
- len, req, &result);
- if (len < 0) {
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
- goto done;
- }
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn),
- L2CAP_CONF_REQ, len, req);
- l2cap_pi(sk)->num_conf_req++;
- if (result != L2CAP_CONF_SUCCESS)
- goto done;
- break;
- }
-
- default:
- sk->sk_err = ECONNRESET;
- l2cap_sock_set_timer(sk, HZ * 5);
- l2cap_send_disconn_req(conn, sk, ECONNRESET);
- goto done;
- }
-
- if (flags & 0x01)
- goto done;
-
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
- set_default_fcs(l2cap_pi(sk));
-
- sk->sk_state = BT_CONNECTED;
- l2cap_pi(sk)->next_tx_seq = 0;
- l2cap_pi(sk)->expected_tx_seq = 0;
- __skb_queue_head_init(TX_QUEUE(sk));
- if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(sk);
-
- l2cap_chan_ready(sk);
- }
-
-done:
- bh_unlock_sock(sk);
- return 0;
-}
-
-static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
- struct l2cap_disconn_rsp rsp;
- u16 dcid, scid;
- struct sock *sk;
-
- scid = __le16_to_cpu(req->scid);
- dcid = __le16_to_cpu(req->dcid);
-
- BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
-
- sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
- if (!sk)
- return 0;
-
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
-
- sk->sk_shutdown = SHUTDOWN_MASK;
-
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
- return 0;
- }
-
- l2cap_chan_del(sk, ECONNRESET);
- bh_unlock_sock(sk);
-
- l2cap_sock_kill(sk);
- return 0;
-}
-
-static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
- u16 dcid, scid;
- struct sock *sk;
-
- scid = __le16_to_cpu(rsp->scid);
- dcid = __le16_to_cpu(rsp->dcid);
-
- BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
-
- sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
- if (!sk)
- return 0;
-
- /* don't delete l2cap channel if sk is owned by user */
- if (sock_owned_by_user(sk)) {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 5);
- bh_unlock_sock(sk);
- return 0;
- }
-
- l2cap_chan_del(sk, 0);
- bh_unlock_sock(sk);
-
- l2cap_sock_kill(sk);
- return 0;
-}
-
-static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_info_req *req = (struct l2cap_info_req *) data;
- u16 type;
-
- type = __le16_to_cpu(req->type);
-
- BT_DBG("type 0x%4.4x", type);
-
- if (type == L2CAP_IT_FEAT_MASK) {
- u8 buf[8];
- u32 feat_mask = l2cap_feat_mask;
- struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- if (!disable_ertm)
- feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
- | L2CAP_FEAT_FCS;
- put_unaligned_le32(feat_mask, rsp->data);
- l2cap_send_cmd(conn, cmd->ident,
- L2CAP_INFO_RSP, sizeof(buf), buf);
- } else if (type == L2CAP_IT_FIXED_CHAN) {
- u8 buf[12];
- struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- memcpy(buf + 4, l2cap_fixed_chan, 8);
- l2cap_send_cmd(conn, cmd->ident,
- L2CAP_INFO_RSP, sizeof(buf), buf);
- } else {
- struct l2cap_info_rsp rsp;
- rsp.type = cpu_to_le16(type);
- rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
- l2cap_send_cmd(conn, cmd->ident,
- L2CAP_INFO_RSP, sizeof(rsp), &rsp);
- }
-
- return 0;
-}
-
-static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
-{
- struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
- u16 type, result;
-
- type = __le16_to_cpu(rsp->type);
- result = __le16_to_cpu(rsp->result);
-
- BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
-
- del_timer(&conn->info_timer);
-
- if (result != L2CAP_IR_SUCCESS) {
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
- conn->info_ident = 0;
-
- l2cap_conn_start(conn);
-
- return 0;
- }
-
- if (type == L2CAP_IT_FEAT_MASK) {
- conn->feat_mask = get_unaligned_le32(rsp->data);
-
- if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
- struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
-
- conn->info_ident = l2cap_get_ident(conn);
-
- l2cap_send_cmd(conn, conn->info_ident,
- L2CAP_INFO_REQ, sizeof(req), &req);
- } else {
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
- conn->info_ident = 0;
-
- l2cap_conn_start(conn);
- }
- } else if (type == L2CAP_IT_FIXED_CHAN) {
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
- conn->info_ident = 0;
-
- l2cap_conn_start(conn);
- }
-
- return 0;
-}
-
-static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
-{
- u8 *data = skb->data;
- int len = skb->len;
- struct l2cap_cmd_hdr cmd;
- int err = 0;
-
- l2cap_raw_recv(conn, skb);
-
- while (len >= L2CAP_CMD_HDR_SIZE) {
- u16 cmd_len;
- memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
- data += L2CAP_CMD_HDR_SIZE;
- len -= L2CAP_CMD_HDR_SIZE;
-
- cmd_len = le16_to_cpu(cmd.len);
-
- BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
-
- if (cmd_len > len || !cmd.ident) {
- BT_DBG("corrupted command");
- break;
- }
-
- switch (cmd.code) {
- case L2CAP_COMMAND_REJ:
- l2cap_command_rej(conn, &cmd, data);
- break;
-
- case L2CAP_CONN_REQ:
- err = l2cap_connect_req(conn, &cmd, data);
- break;
-
- case L2CAP_CONN_RSP:
- err = l2cap_connect_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_CONF_REQ:
- err = l2cap_config_req(conn, &cmd, cmd_len, data);
- break;
-
- case L2CAP_CONF_RSP:
- err = l2cap_config_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_DISCONN_REQ:
- err = l2cap_disconnect_req(conn, &cmd, data);
- break;
-
- case L2CAP_DISCONN_RSP:
- err = l2cap_disconnect_rsp(conn, &cmd, data);
- break;
-
- case L2CAP_ECHO_REQ:
- l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
- break;
-
- case L2CAP_ECHO_RSP:
- break;
-
- case L2CAP_INFO_REQ:
- err = l2cap_information_req(conn, &cmd, data);
- break;
-
- case L2CAP_INFO_RSP:
- err = l2cap_information_rsp(conn, &cmd, data);
- break;
-
- default:
- BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
- err = -EINVAL;
- break;
- }
-
- if (err) {
- struct l2cap_cmd_rej rej;
- BT_DBG("error %d", err);
-
- /* FIXME: Map err to a valid reason */
- rej.reason = cpu_to_le16(0);
- l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
- }
-
- data += cmd_len;
- len -= cmd_len;
- }
-
- kfree_skb(skb);
-}
-
-static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
-{
- u16 our_fcs, rcv_fcs;
- int hdr_size = L2CAP_HDR_SIZE + 2;
-
- if (pi->fcs == L2CAP_FCS_CRC16) {
- skb_trim(skb, skb->len - 2);
- rcv_fcs = get_unaligned_le16(skb->data + skb->len);
- our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
-
- if (our_fcs != rcv_fcs)
- return -EBADMSG;
- }
- return 0;
-}
-
-static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u16 control = 0;
-
- pi->frames_sent = 0;
-
- control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- control |= L2CAP_SUPER_RCV_NOT_READY;
- l2cap_send_sframe(pi, control);
- pi->conn_state |= L2CAP_CONN_RNR_SENT;
- }
-
- if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
- l2cap_retransmit_frames(sk);
-
- l2cap_ertm_send(sk);
-
- if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
- pi->frames_sent == 0) {
- control |= L2CAP_SUPER_RCV_READY;
- l2cap_send_sframe(pi, control);
- }
-}
-
-static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
-{
- struct sk_buff *next_skb;
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- int tx_seq_offset, next_tx_seq_offset;
-
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
-
- next_skb = skb_peek(SREJ_QUEUE(sk));
- if (!next_skb) {
- __skb_queue_tail(SREJ_QUEUE(sk), skb);
- return 0;
- }
-
- tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
-
- do {
- if (bt_cb(next_skb)->tx_seq == tx_seq)
- return -EINVAL;
-
- next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
- pi->buffer_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
-
- if (next_tx_seq_offset > tx_seq_offset) {
- __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
- return 0;
- }
-
- if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
- break;
-
- } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
-
- __skb_queue_tail(SREJ_QUEUE(sk), skb);
-
- return 0;
-}
-
-static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *_skb;
- int err;
-
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
- if (pi->conn_state & L2CAP_CONN_SAR_SDU)
- goto drop;
-
- err = sock_queue_rcv_skb(sk, skb);
- if (!err)
- return err;
-
- break;
-
- case L2CAP_SDU_START:
- if (pi->conn_state & L2CAP_CONN_SAR_SDU)
- goto drop;
-
- pi->sdu_len = get_unaligned_le16(skb->data);
-
- if (pi->sdu_len > pi->imtu)
- goto disconnect;
-
- pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
- if (!pi->sdu)
- return -ENOMEM;
-
- /* pull sdu_len bytes only after alloc, because of Local Busy
- * condition we have to be sure that this will be executed
- * only once, i.e., when alloc does not fail */
- skb_pull(skb, 2);
-
- memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
- pi->conn_state |= L2CAP_CONN_SAR_SDU;
- pi->partial_sdu_len = skb->len;
- break;
-
- case L2CAP_SDU_CONTINUE:
- if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
- goto disconnect;
-
- if (!pi->sdu)
- goto disconnect;
-
- pi->partial_sdu_len += skb->len;
- if (pi->partial_sdu_len > pi->sdu_len)
- goto drop;
-
- memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
- break;
-
- case L2CAP_SDU_END:
- if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
- goto disconnect;
-
- if (!pi->sdu)
- goto disconnect;
-
- if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
- pi->partial_sdu_len += skb->len;
-
- if (pi->partial_sdu_len > pi->imtu)
- goto drop;
-
- if (pi->partial_sdu_len != pi->sdu_len)
- goto drop;
-
- memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
- }
-
- _skb = skb_clone(pi->sdu, GFP_ATOMIC);
- if (!_skb) {
- pi->conn_state |= L2CAP_CONN_SAR_RETRY;
- return -ENOMEM;
- }
-
- err = sock_queue_rcv_skb(sk, _skb);
- if (err < 0) {
- kfree_skb(_skb);
- pi->conn_state |= L2CAP_CONN_SAR_RETRY;
- return err;
- }
-
- pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
- pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
-
- kfree_skb(pi->sdu);
- break;
- }
-
- kfree_skb(skb);
- return 0;
-
-drop:
- kfree_skb(pi->sdu);
- pi->sdu = NULL;
-
-disconnect:
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- kfree_skb(skb);
- return 0;
-}
-
-static int l2cap_try_push_rx_skb(struct sock *sk)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *skb;
- u16 control;
- int err;
-
- while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
- control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
- err = l2cap_ertm_reassembly_sdu(sk, skb, control);
- if (err < 0) {
- skb_queue_head(BUSY_QUEUE(sk), skb);
- return -EBUSY;
- }
-
- pi->buffer_seq = (pi->buffer_seq + 1) % 64;
- }
-
- if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
- goto done;
-
- control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
- l2cap_send_sframe(pi, control);
- l2cap_pi(sk)->retry_count = 1;
-
- del_timer(&pi->retrans_timer);
- __mod_monitor_timer();
-
- l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
-
-done:
- pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
- pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
-
- BT_DBG("sk %p, Exit local busy", sk);
-
- return 0;
-}
-
-static void l2cap_busy_work(struct work_struct *work)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct l2cap_pinfo *pi =
- container_of(work, struct l2cap_pinfo, busy_work);
- struct sock *sk = (struct sock *)pi;
- int n_tries = 0, timeo = HZ/5, err;
- struct sk_buff *skb;
-
- lock_sock(sk);
-
- add_wait_queue(sk_sleep(sk), &wait);
- while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
- err = -EBUSY;
- l2cap_send_disconn_req(pi->conn, sk, EBUSY);
- break;
- }
-
- if (!timeo)
- timeo = HZ/5;
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
-
- err = sock_error(sk);
- if (err)
- break;
-
- if (l2cap_try_push_rx_skb(sk) == 0)
- break;
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk_sleep(sk), &wait);
-
- release_sock(sk);
-}
-
-static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- int sctrl, err;
-
- if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
- bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
- __skb_queue_tail(BUSY_QUEUE(sk), skb);
- return l2cap_try_push_rx_skb(sk);
-
-
- }
-
- err = l2cap_ertm_reassembly_sdu(sk, skb, control);
- if (err >= 0) {
- pi->buffer_seq = (pi->buffer_seq + 1) % 64;
- return err;
- }
-
- /* Busy Condition */
- BT_DBG("sk %p, Enter local busy", sk);
-
- pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
- bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
- __skb_queue_tail(BUSY_QUEUE(sk), skb);
-
- sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- sctrl |= L2CAP_SUPER_RCV_NOT_READY;
- l2cap_send_sframe(pi, sctrl);
-
- pi->conn_state |= L2CAP_CONN_RNR_SENT;
-
- del_timer(&pi->ack_timer);
-
- queue_work(_busy_wq, &pi->busy_work);
-
- return err;
-}
-
-static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct sk_buff *_skb;
- int err = -EINVAL;
-
- /*
- * TODO: We have to notify the userland if some data is lost with the
- * Streaming Mode.
- */
-
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
- if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
- kfree_skb(pi->sdu);
- break;
- }
-
- err = sock_queue_rcv_skb(sk, skb);
- if (!err)
- return 0;
-
- break;
-
- case L2CAP_SDU_START:
- if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
- kfree_skb(pi->sdu);
- break;
- }
-
- pi->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
-
- if (pi->sdu_len > pi->imtu) {
- err = -EMSGSIZE;
- break;
- }
-
- pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
- if (!pi->sdu) {
- err = -ENOMEM;
- break;
- }
-
- memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
- pi->conn_state |= L2CAP_CONN_SAR_SDU;
- pi->partial_sdu_len = skb->len;
- err = 0;
- break;
-
- case L2CAP_SDU_CONTINUE:
- if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
- break;
-
- memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
- pi->partial_sdu_len += skb->len;
- if (pi->partial_sdu_len > pi->sdu_len)
- kfree_skb(pi->sdu);
- else
- err = 0;
-
- break;
-
- case L2CAP_SDU_END:
- if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
- break;
-
- memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
-
- pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
- pi->partial_sdu_len += skb->len;
-
- if (pi->partial_sdu_len > pi->imtu)
- goto drop;
-
- if (pi->partial_sdu_len == pi->sdu_len) {
- _skb = skb_clone(pi->sdu, GFP_ATOMIC);
- err = sock_queue_rcv_skb(sk, _skb);
- if (err < 0)
- kfree_skb(_skb);
- }
- err = 0;
-
-drop:
- kfree_skb(pi->sdu);
- break;
- }
-
- kfree_skb(skb);
- return err;
-}
-
-static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
-{
- struct sk_buff *skb;
- u16 control;
-
- while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
- if (bt_cb(skb)->tx_seq != tx_seq)
- break;
-
- skb = skb_dequeue(SREJ_QUEUE(sk));
- control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
- l2cap_ertm_reassembly_sdu(sk, skb, control);
- l2cap_pi(sk)->buffer_seq_srej =
- (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
- tx_seq = (tx_seq + 1) % 64;
- }
-}
-
-static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct srej_list *l, *tmp;
- u16 control;
-
- list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
- if (l->tx_seq == tx_seq) {
- list_del(&l->list);
- kfree(l);
- return;
- }
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- l2cap_send_sframe(pi, control);
- list_del(&l->list);
- list_add_tail(&l->list, SREJ_LIST(sk));
- }
-}
-
-static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct srej_list *new;
- u16 control;
-
- while (tx_seq != pi->expected_tx_seq) {
- control = L2CAP_SUPER_SELECT_REJECT;
- control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
- l2cap_send_sframe(pi, control);
-
- new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
- new->tx_seq = pi->expected_tx_seq;
- pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
- list_add_tail(&new->list, SREJ_LIST(sk));
- }
- pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
-}
-
-static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u8 tx_seq = __get_txseq(rx_control);
- u8 req_seq = __get_reqseq(rx_control);
- u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
- int tx_seq_offset, expected_tx_seq_offset;
- int num_to_ack = (pi->tx_win/6) + 1;
- int err = 0;
-
- BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
- rx_control);
-
- if (L2CAP_CTRL_FINAL & rx_control &&
- l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
- del_timer(&pi->monitor_timer);
- if (pi->unacked_frames > 0)
- __mod_retrans_timer();
- pi->conn_state &= ~L2CAP_CONN_WAIT_F;
- }
-
- pi->expected_ack_seq = req_seq;
- l2cap_drop_acked_frames(sk);
-
- if (tx_seq == pi->expected_tx_seq)
- goto expected;
-
- tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
- if (tx_seq_offset < 0)
- tx_seq_offset += 64;
-
- /* invalid tx_seq */
- if (tx_seq_offset >= pi->tx_win) {
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- goto drop;
- }
-
- if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
- goto drop;
-
- if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
- struct srej_list *first;
-
- first = list_first_entry(SREJ_LIST(sk),
- struct srej_list, list);
- if (tx_seq == first->tx_seq) {
- l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
- l2cap_check_srej_gap(sk, tx_seq);
-
- list_del(&first->list);
- kfree(first);
-
- if (list_empty(SREJ_LIST(sk))) {
- pi->buffer_seq = pi->buffer_seq_srej;
- pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
- l2cap_send_ack(pi);
- BT_DBG("sk %p, Exit SREJ_SENT", sk);
- }
- } else {
- struct srej_list *l;
-
- /* duplicated tx_seq */
- if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
- goto drop;
-
- list_for_each_entry(l, SREJ_LIST(sk), list) {
- if (l->tx_seq == tx_seq) {
- l2cap_resend_srejframe(sk, tx_seq);
- return 0;
- }
- }
- l2cap_send_srejframe(sk, tx_seq);
- }
- } else {
- expected_tx_seq_offset =
- (pi->expected_tx_seq - pi->buffer_seq) % 64;
- if (expected_tx_seq_offset < 0)
- expected_tx_seq_offset += 64;
-
- /* duplicated tx_seq */
- if (tx_seq_offset < expected_tx_seq_offset)
- goto drop;
-
- pi->conn_state |= L2CAP_CONN_SREJ_SENT;
-
- BT_DBG("sk %p, Enter SREJ", sk);
-
- INIT_LIST_HEAD(SREJ_LIST(sk));
- pi->buffer_seq_srej = pi->buffer_seq;
-
- __skb_queue_head_init(SREJ_QUEUE(sk));
- __skb_queue_head_init(BUSY_QUEUE(sk));
- l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
-
- pi->conn_state |= L2CAP_CONN_SEND_PBIT;
-
- l2cap_send_srejframe(sk, tx_seq);
-
- del_timer(&pi->ack_timer);
- }
- return 0;
-
-expected:
- pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
-
- if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
- __skb_queue_tail(SREJ_QUEUE(sk), skb);
- return 0;
- }
-
- err = l2cap_push_rx_skb(sk, skb, rx_control);
- if (err < 0)
- return 0;
-
- if (rx_control & L2CAP_CTRL_FINAL) {
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else
- l2cap_retransmit_frames(sk);
- }
-
- __mod_ack_timer();
-
- pi->num_acked = (pi->num_acked + 1) % num_to_ack;
- if (pi->num_acked == num_to_ack - 1)
- l2cap_send_ack(pi);
-
- return 0;
-
-drop:
- kfree_skb(skb);
- return 0;
-}
-
-static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
- rx_control);
-
- pi->expected_ack_seq = __get_reqseq(rx_control);
- l2cap_drop_acked_frames(sk);
-
- if (rx_control & L2CAP_CTRL_POLL) {
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
- if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->unacked_frames > 0))
- __mod_retrans_timer();
-
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- l2cap_send_srejtail(sk);
- } else {
- l2cap_send_i_or_rr_or_rnr(sk);
- }
-
- } else if (rx_control & L2CAP_CTRL_FINAL) {
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else
- l2cap_retransmit_frames(sk);
-
- } else {
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
- (pi->unacked_frames > 0))
- __mod_retrans_timer();
-
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
- l2cap_send_ack(pi);
- else
- l2cap_ertm_send(sk);
- }
-}
-
-static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u8 tx_seq = __get_reqseq(rx_control);
-
- BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
-
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
-
- if (rx_control & L2CAP_CTRL_FINAL) {
- if (pi->conn_state & L2CAP_CONN_REJ_ACT)
- pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
- else
- l2cap_retransmit_frames(sk);
- } else {
- l2cap_retransmit_frames(sk);
-
- if (pi->conn_state & L2CAP_CONN_WAIT_F)
- pi->conn_state |= L2CAP_CONN_REJ_ACT;
- }
-}
-static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u8 tx_seq = __get_reqseq(rx_control);
-
- BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
-
- pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
-
- if (rx_control & L2CAP_CTRL_POLL) {
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
-
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
- l2cap_retransmit_one_frame(sk, tx_seq);
-
- l2cap_ertm_send(sk);
-
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- }
- } else if (rx_control & L2CAP_CTRL_FINAL) {
- if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
- pi->srej_save_reqseq == tx_seq)
- pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
- else
- l2cap_retransmit_one_frame(sk, tx_seq);
- } else {
- l2cap_retransmit_one_frame(sk, tx_seq);
- if (pi->conn_state & L2CAP_CONN_WAIT_F) {
- pi->srej_save_reqseq = tx_seq;
- pi->conn_state |= L2CAP_CONN_SREJ_ACT;
- }
- }
-}
-
-static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u8 tx_seq = __get_reqseq(rx_control);
-
- BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
-
- pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
- pi->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(sk);
-
- if (rx_control & L2CAP_CTRL_POLL)
- pi->conn_state |= L2CAP_CONN_SEND_FBIT;
-
- if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
- del_timer(&pi->retrans_timer);
- if (rx_control & L2CAP_CTRL_POLL)
- l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
- return;
- }
-
- if (rx_control & L2CAP_CTRL_POLL)
- l2cap_send_srejtail(sk);
- else
- l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
-}
-
-static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
-{
- BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
-
- if (L2CAP_CTRL_FINAL & rx_control &&
- l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
- del_timer(&l2cap_pi(sk)->monitor_timer);
- if (l2cap_pi(sk)->unacked_frames > 0)
- __mod_retrans_timer();
- l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
- }
-
- switch (rx_control & L2CAP_CTRL_SUPERVISE) {
- case L2CAP_SUPER_RCV_READY:
- l2cap_data_channel_rrframe(sk, rx_control);
- break;
-
- case L2CAP_SUPER_REJECT:
- l2cap_data_channel_rejframe(sk, rx_control);
- break;
-
- case L2CAP_SUPER_SELECT_REJECT:
- l2cap_data_channel_srejframe(sk, rx_control);
- break;
-
- case L2CAP_SUPER_RCV_NOT_READY:
- l2cap_data_channel_rnrframe(sk, rx_control);
- break;
- }
-
- kfree_skb(skb);
- return 0;
-}
-
-static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
-{
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- u16 control;
- u8 req_seq;
- int len, next_tx_seq_offset, req_seq_offset;
-
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
- len = skb->len;
-
- /*
- * We can just drop the corrupted I-frame here.
- * Receiver will miss it and start proper recovery
- * procedures and ask retransmission.
- */
- if (l2cap_check_fcs(pi, skb))
- goto drop;
-
- if (__is_sar_start(control) && __is_iframe(control))
- len -= 2;
-
- if (pi->fcs == L2CAP_FCS_CRC16)
- len -= 2;
-
- if (len > pi->mps) {
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- goto drop;
- }
-
- req_seq = __get_reqseq(control);
- req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
- if (req_seq_offset < 0)
- req_seq_offset += 64;
-
- next_tx_seq_offset =
- (pi->next_tx_seq - pi->expected_ack_seq) % 64;
- if (next_tx_seq_offset < 0)
- next_tx_seq_offset += 64;
-
- /* check for invalid req-seq */
- if (req_seq_offset > next_tx_seq_offset) {
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- goto drop;
- }
-
- if (__is_iframe(control)) {
- if (len < 0) {
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- goto drop;
- }
-
- l2cap_data_channel_iframe(sk, control, skb);
- } else {
- if (len != 0) {
- BT_ERR("%d", len);
- l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
- goto drop;
- }
-
- l2cap_data_channel_sframe(sk, control, skb);
- }
-
- return 0;
-
-drop:
- kfree_skb(skb);
- return 0;
-}
-
-static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
-{
- struct sock *sk;
- struct l2cap_pinfo *pi;
- u16 control;
- u8 tx_seq;
- int len;
-
- sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
- if (!sk) {
- BT_DBG("unknown cid 0x%4.4x", cid);
- goto drop;
- }
-
- pi = l2cap_pi(sk);
-
- BT_DBG("sk %p, len %d", sk, skb->len);
-
- if (sk->sk_state != BT_CONNECTED)
- goto drop;
-
- switch (pi->mode) {
- case L2CAP_MODE_BASIC:
- /* If socket recv buffers overflows we drop data here
- * which is *bad* because L2CAP has to be reliable.
- * But we don't have any other choice. L2CAP doesn't
- * provide flow control mechanism. */
-
- if (pi->imtu < skb->len)
- goto drop;
-
- if (!sock_queue_rcv_skb(sk, skb))
- goto done;
- break;
-
- case L2CAP_MODE_ERTM:
- if (!sock_owned_by_user(sk)) {
- l2cap_ertm_data_rcv(sk, skb);
- } else {
- if (sk_add_backlog(sk, skb))
- goto drop;
- }
-
- goto done;
-
- case L2CAP_MODE_STREAMING:
- control = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
- len = skb->len;
-
- if (l2cap_check_fcs(pi, skb))
- goto drop;
-
- if (__is_sar_start(control))
- len -= 2;
-
- if (pi->fcs == L2CAP_FCS_CRC16)
- len -= 2;
-
- if (len > pi->mps || len < 0 || __is_sframe(control))
- goto drop;
-
- tx_seq = __get_txseq(control);
-
- if (pi->expected_tx_seq == tx_seq)
- pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
- else
- pi->expected_tx_seq = (tx_seq + 1) % 64;
-
- l2cap_streaming_reassembly_sdu(sk, skb, control);
-
- goto done;
-
- default:
- BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
- break;
- }
-
-drop:
- kfree_skb(skb);
-
-done:
- if (sk)
- bh_unlock_sock(sk);
-
- return 0;
-}
-
-static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
-{
- struct sock *sk;
-
- sk = l2cap_get_sock_by_psm(0, psm, conn->src);
- if (!sk)
- goto drop;
-
- bh_lock_sock(sk);
-
- BT_DBG("sk %p, len %d", sk, skb->len);
-
- if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
- goto drop;
-
- if (l2cap_pi(sk)->imtu < skb->len)
- goto drop;
-
- if (!sock_queue_rcv_skb(sk, skb))
- goto done;
-
-drop:
- kfree_skb(skb);
-
-done:
- if (sk)
- bh_unlock_sock(sk);
- return 0;
-}
-
-static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
-{
- struct l2cap_hdr *lh = (void *) skb->data;
- u16 cid, len;
- __le16 psm;
-
- skb_pull(skb, L2CAP_HDR_SIZE);
- cid = __le16_to_cpu(lh->cid);
- len = __le16_to_cpu(lh->len);
-
- if (len != skb->len) {
- kfree_skb(skb);
- return;
- }
-
- BT_DBG("len %d, cid 0x%4.4x", len, cid);
-
- switch (cid) {
- case L2CAP_CID_SIGNALING:
- l2cap_sig_channel(conn, skb);
- break;
-
- case L2CAP_CID_CONN_LESS:
- psm = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
- l2cap_conless_channel(conn, psm, skb);
- break;
-
- default:
- l2cap_data_channel(conn, cid, skb);
- break;
- }
-}
-
-/* ---- L2CAP interface with lower layer (HCI) ---- */
-
-static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
- int exact = 0, lm1 = 0, lm2 = 0;
- register struct sock *sk;
- struct hlist_node *node;
-
- if (type != ACL_LINK)
- return -EINVAL;
-
- BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
-
- /* Find listening sockets and check their link_mode */
- read_lock(&l2cap_sk_list.lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- if (sk->sk_state != BT_LISTEN)
- continue;
-
- if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
- lm1 |= HCI_LM_ACCEPT;
- if (l2cap_pi(sk)->role_switch)
- lm1 |= HCI_LM_MASTER;
- exact++;
- } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
- lm2 |= HCI_LM_ACCEPT;
- if (l2cap_pi(sk)->role_switch)
- lm2 |= HCI_LM_MASTER;
- }
- }
- read_unlock(&l2cap_sk_list.lock);
-
- return exact ? lm1 : lm2;
-}
-
-static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
-{
- struct l2cap_conn *conn;
-
- BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
-
- if (hcon->type != ACL_LINK)
- return -EINVAL;
-
- if (!status) {
- conn = l2cap_conn_add(hcon, status);
- if (conn)
- l2cap_conn_ready(conn);
- } else
- l2cap_conn_del(hcon, bt_err(status));
-
- return 0;
-}
-
-static int l2cap_disconn_ind(struct hci_conn *hcon)
-{
- struct l2cap_conn *conn = hcon->l2cap_data;
-
- BT_DBG("hcon %p", hcon);
-
- if (hcon->type != ACL_LINK || !conn)
- return 0x13;
-
- return conn->disc_reason;
-}
-
-static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
-{
- BT_DBG("hcon %p reason %d", hcon, reason);
-
- if (hcon->type != ACL_LINK)
- return -EINVAL;
-
- l2cap_conn_del(hcon, bt_err(reason));
-
- return 0;
-}
-
-static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
-{
- if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
- return;
-
- if (encrypt == 0x00) {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ * 5);
- } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
- __l2cap_sock_close(sk, ECONNREFUSED);
- } else {
- if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
- l2cap_sock_clear_timer(sk);
- }
-}
-
-static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
-{
- struct l2cap_chan_list *l;
- struct l2cap_conn *conn = hcon->l2cap_data;
- struct sock *sk;
-
- if (!conn)
- return 0;
-
- l = &conn->chan_list;
-
- BT_DBG("conn %p", conn);
-
- read_lock(&l->lock);
-
- for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
- bh_lock_sock(sk);
-
- if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
- bh_unlock_sock(sk);
- continue;
- }
-
- if (!status && (sk->sk_state == BT_CONNECTED ||
- sk->sk_state == BT_CONFIG)) {
- l2cap_check_encryption(sk, encrypt);
- bh_unlock_sock(sk);
- continue;
- }
-
- if (sk->sk_state == BT_CONNECT) {
- if (!status) {
- struct l2cap_conn_req req;
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- req.psm = l2cap_pi(sk)->psm;
-
- l2cap_pi(sk)->ident = l2cap_get_ident(conn);
- l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
-
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_REQ, sizeof(req), &req);
- } else {
- l2cap_sock_clear_timer(sk);
- l2cap_sock_set_timer(sk, HZ / 10);
- }
- } else if (sk->sk_state == BT_CONNECT2) {
- struct l2cap_conn_rsp rsp;
- __u16 result;
-
- if (!status) {
- sk->sk_state = BT_CONFIG;
- result = L2CAP_CR_SUCCESS;
- } else {
- sk->sk_state = BT_DISCONN;
- l2cap_sock_set_timer(sk, HZ / 10);
- result = L2CAP_CR_SEC_BLOCK;
- }
-
- rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
- rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
- rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
- L2CAP_CONN_RSP, sizeof(rsp), &rsp);
- }
-
- bh_unlock_sock(sk);
- }
-
- read_unlock(&l->lock);
-
- return 0;
-}
-
-static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
-{
- struct l2cap_conn *conn = hcon->l2cap_data;
-
- if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
- goto drop;
-
- BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
-
- if (flags & ACL_START) {
- struct l2cap_hdr *hdr;
- struct sock *sk;
- u16 cid;
- int len;
-
- if (conn->rx_len) {
- BT_ERR("Unexpected start frame (len %d)", skb->len);
- kfree_skb(conn->rx_skb);
- conn->rx_skb = NULL;
- conn->rx_len = 0;
- l2cap_conn_unreliable(conn, ECOMM);
- }
-
- /* Start fragment always begin with Basic L2CAP header */
- if (skb->len < L2CAP_HDR_SIZE) {
- BT_ERR("Frame is too short (len %d)", skb->len);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
-
- hdr = (struct l2cap_hdr *) skb->data;
- len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
- cid = __le16_to_cpu(hdr->cid);
-
- if (len == skb->len) {
- /* Complete frame received */
- l2cap_recv_frame(conn, skb);
- return 0;
- }
-
- BT_DBG("Start: total len %d, frag len %d", len, skb->len);
-
- if (skb->len > len) {
- BT_ERR("Frame is too long (len %d, expected len %d)",
- skb->len, len);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
-
- sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
-
- if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
- BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
- len, l2cap_pi(sk)->imtu);
- bh_unlock_sock(sk);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
-
- if (sk)
- bh_unlock_sock(sk);
-
- /* Allocate skb for the complete frame (with header) */
- conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!conn->rx_skb)
- goto drop;
-
- skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
- skb->len);
- conn->rx_len = len - skb->len;
- } else {
- BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
-
- if (!conn->rx_len) {
- BT_ERR("Unexpected continuation frame (len %d)", skb->len);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
-
- if (skb->len > conn->rx_len) {
- BT_ERR("Fragment is too long (len %d, expected %d)",
- skb->len, conn->rx_len);
- kfree_skb(conn->rx_skb);
- conn->rx_skb = NULL;
- conn->rx_len = 0;
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
-
- skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
- skb->len);
- conn->rx_len -= skb->len;
-
- if (!conn->rx_len) {
- /* Complete frame received */
- l2cap_recv_frame(conn, conn->rx_skb);
- conn->rx_skb = NULL;
- }
- }
-
-drop:
- kfree_skb(skb);
- return 0;
-}
-
-static int l2cap_debugfs_show(struct seq_file *f, void *p)
-{
- struct sock *sk;
- struct hlist_node *node;
-
- read_lock_bh(&l2cap_sk_list.lock);
-
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- struct l2cap_pinfo *pi = l2cap_pi(sk);
-
- seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
- batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst),
- sk->sk_state, __le16_to_cpu(pi->psm),
- pi->scid, pi->dcid,
- pi->imtu, pi->omtu, pi->sec_level);
- }
-
- read_unlock_bh(&l2cap_sk_list.lock);
-
- return 0;
-}
-
-static int l2cap_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, l2cap_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations l2cap_debugfs_fops = {
- .open = l2cap_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static struct dentry *l2cap_debugfs;
-
-static const struct proto_ops l2cap_sock_ops = {
- .family = PF_BLUETOOTH,
- .owner = THIS_MODULE,
- .release = l2cap_sock_release,
- .bind = l2cap_sock_bind,
- .connect = l2cap_sock_connect,
- .listen = l2cap_sock_listen,
- .accept = l2cap_sock_accept,
- .getname = l2cap_sock_getname,
- .sendmsg = l2cap_sock_sendmsg,
- .recvmsg = l2cap_sock_recvmsg,
- .poll = bt_sock_poll,
- .ioctl = bt_sock_ioctl,
- .mmap = sock_no_mmap,
- .socketpair = sock_no_socketpair,
- .shutdown = l2cap_sock_shutdown,
- .setsockopt = l2cap_sock_setsockopt,
- .getsockopt = l2cap_sock_getsockopt
-};
-
-static const struct net_proto_family l2cap_sock_family_ops = {
- .family = PF_BLUETOOTH,
- .owner = THIS_MODULE,
- .create = l2cap_sock_create,
-};
-
-static struct hci_proto l2cap_hci_proto = {
- .name = "L2CAP",
- .id = HCI_PROTO_L2CAP,
- .connect_ind = l2cap_connect_ind,
- .connect_cfm = l2cap_connect_cfm,
- .disconn_ind = l2cap_disconn_ind,
- .disconn_cfm = l2cap_disconn_cfm,
- .security_cfm = l2cap_security_cfm,
- .recv_acldata = l2cap_recv_acldata
-};
-
-static int __init l2cap_init(void)
-{
- int err;
-
- err = proto_register(&l2cap_proto, 0);
- if (err < 0)
- return err;
-
- _busy_wq = create_singlethread_workqueue("l2cap");
- if (!_busy_wq) {
- proto_unregister(&l2cap_proto);
- return -ENOMEM;
- }
-
- err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
- if (err < 0) {
- BT_ERR("L2CAP socket registration failed");
- goto error;
- }
-
- err = hci_register_proto(&l2cap_hci_proto);
- if (err < 0) {
- BT_ERR("L2CAP protocol registration failed");
- bt_sock_unregister(BTPROTO_L2CAP);
- goto error;
- }
-
- if (bt_debugfs) {
- l2cap_debugfs = debugfs_create_file("l2cap", 0444,
- bt_debugfs, NULL, &l2cap_debugfs_fops);
- if (!l2cap_debugfs)
- BT_ERR("Failed to create L2CAP debug file");
- }
-
- BT_INFO("L2CAP ver %s", VERSION);
- BT_INFO("L2CAP socket layer initialized");
-
- return 0;
-
-error:
- destroy_workqueue(_busy_wq);
- proto_unregister(&l2cap_proto);
- return err;
-}
-
-static void __exit l2cap_exit(void)
-{
- debugfs_remove(l2cap_debugfs);
-
- flush_workqueue(_busy_wq);
- destroy_workqueue(_busy_wq);
-
- if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
- BT_ERR("L2CAP socket unregistration failed");
-
- if (hci_unregister_proto(&l2cap_hci_proto) < 0)
- BT_ERR("L2CAP protocol unregistration failed");
-
- proto_unregister(&l2cap_proto);
-}
-
-void l2cap_load(void)
-{
- /* Dummy function to trigger automatic L2CAP module loading by
- * other modules that use L2CAP sockets but don't use any other
- * symbols from it. */
-}
-EXPORT_SYMBOL(l2cap_load);
-
-module_init(l2cap_init);
-module_exit(l2cap_exit);
-
-module_param(disable_ertm, bool, 0644);
-MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-0");
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
new file mode 100644
index 00000000000..323f23cd2c3
--- /dev/null
+++ b/net/bluetooth/l2cap_core.c
@@ -0,0 +1,7552 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+ Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
+ Copyright (c) 2012 Code Aurora Forum. All rights reserved.
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth L2CAP core. */
+
+#include <linux/module.h>
+
+#include <linux/debugfs.h>
+#include <linux/crc16.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+#include "smp.h"
+#include "a2mp.h"
+#include "amp.h"
+#include "6lowpan.h"
+
+#define LE_FLOWCTL_MAX_CREDITS 65535
+
+bool disable_ertm;
+
+static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
+
+static LIST_HEAD(chan_list);
+static DEFINE_RWLOCK(chan_list_lock);
+
+static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
+static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
+
+static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ u8 code, u8 ident, u16 dlen, void *data);
+static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event);
+
+static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+{
+ if (hcon->type == LE_LINK) {
+ if (type == ADDR_LE_DEV_PUBLIC)
+ return BDADDR_LE_PUBLIC;
+ else
+ return BDADDR_LE_RANDOM;
+ }
+
+ return BDADDR_BREDR;
+}
+
+/* ---- L2CAP channels ---- */
+
+static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+ u16 cid)
+{
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->dcid == cid)
+ return c;
+ }
+ return NULL;
+}
+
+static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+ u16 cid)
+{
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->scid == cid)
+ return c;
+ }
+ return NULL;
+}
+
+/* Find channel with given SCID.
+ * Returns locked channel. */
+static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
+ u16 cid)
+{
+ struct l2cap_chan *c;
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_scid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
+}
+
+/* Find channel with given DCID.
+ * Returns locked channel.
+ */
+static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
+ u16 cid)
+{
+ struct l2cap_chan *c;
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_dcid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
+}
+
+static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+ u8 ident)
+{
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &conn->chan_l, list) {
+ if (c->ident == ident)
+ return c;
+ }
+ return NULL;
+}
+
+static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
+ u8 ident)
+{
+ struct l2cap_chan *c;
+
+ mutex_lock(&conn->chan_lock);
+ c = __l2cap_get_chan_by_ident(conn, ident);
+ if (c)
+ l2cap_chan_lock(c);
+ mutex_unlock(&conn->chan_lock);
+
+ return c;
+}
+
+static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
+{
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &chan_list, global_l) {
+ if (c->sport == psm && !bacmp(&c->src, src))
+ return c;
+ }
+ return NULL;
+}
+
+int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
+{
+ int err;
+
+ write_lock(&chan_list_lock);
+
+ if (psm && __l2cap_global_chan_by_addr(psm, src)) {
+ err = -EADDRINUSE;
+ goto done;
+ }
+
+ if (psm) {
+ chan->psm = psm;
+ chan->sport = psm;
+ err = 0;
+ } else {
+ u16 p;
+
+ err = -EINVAL;
+ for (p = 0x1001; p < 0x1100; p += 2)
+ if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
+ chan->psm = cpu_to_le16(p);
+ chan->sport = cpu_to_le16(p);
+ err = 0;
+ break;
+ }
+ }
+
+done:
+ write_unlock(&chan_list_lock);
+ return err;
+}
+
+int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
+{
+ write_lock(&chan_list_lock);
+
+ chan->scid = scid;
+
+ write_unlock(&chan_list_lock);
+
+ return 0;
+}
+
+static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
+{
+ u16 cid, dyn_end;
+
+ if (conn->hcon->type == LE_LINK)
+ dyn_end = L2CAP_CID_LE_DYN_END;
+ else
+ dyn_end = L2CAP_CID_DYN_END;
+
+ for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
+ if (!__l2cap_get_chan_by_scid(conn, cid))
+ return cid;
+ }
+
+ return 0;
+}
+
+static void l2cap_state_change(struct l2cap_chan *chan, int state)
+{
+ BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
+ state_to_string(state));
+
+ chan->state = state;
+ chan->ops->state_change(chan, state, 0);
+}
+
+static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
+ int state, int err)
+{
+ chan->state = state;
+ chan->ops->state_change(chan, chan->state, err);
+}
+
+static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+{
+ chan->ops->state_change(chan, chan->state, err);
+}
+
+static void __set_retrans_timer(struct l2cap_chan *chan)
+{
+ if (!delayed_work_pending(&chan->monitor_timer) &&
+ chan->retrans_timeout) {
+ l2cap_set_timer(chan, &chan->retrans_timer,
+ msecs_to_jiffies(chan->retrans_timeout));
+ }
+}
+
+static void __set_monitor_timer(struct l2cap_chan *chan)
+{
+ __clear_retrans_timer(chan);
+ if (chan->monitor_timeout) {
+ l2cap_set_timer(chan, &chan->monitor_timer,
+ msecs_to_jiffies(chan->monitor_timeout));
+ }
+}
+
+static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
+ u16 seq)
+{
+ struct sk_buff *skb;
+
+ skb_queue_walk(head, skb) {
+ if (bt_cb(skb)->control.txseq == seq)
+ return skb;
+ }
+
+ return NULL;
+}
+
+/* ---- L2CAP sequence number lists ---- */
+
+/* For ERTM, ordered lists of sequence numbers must be tracked for
+ * SREJ requests that are received and for frames that are to be
+ * retransmitted. These seq_list functions implement a singly-linked
+ * list in an array, where membership in the list can also be checked
+ * in constant time. Items can also be added to the tail of the list
+ * and removed from the head in constant time, without further memory
+ * allocs or frees.
+ */
+
+static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
+{
+ size_t alloc_size, i;
+
+ /* Allocated size is a power of 2 to map sequence numbers
+ * (which may be up to 14 bits) in to a smaller array that is
+ * sized for the negotiated ERTM transmit windows.
+ */
+ alloc_size = roundup_pow_of_two(size);
+
+ seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
+ if (!seq_list->list)
+ return -ENOMEM;
+
+ seq_list->mask = alloc_size - 1;
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ for (i = 0; i < alloc_size; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ return 0;
+}
+
+static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
+{
+ kfree(seq_list->list);
+}
+
+static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
+ u16 seq)
+{
+ /* Constant-time check for list membership */
+ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
+}
+
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
+{
+ u16 seq = seq_list->head;
+ u16 mask = seq_list->mask;
+
+ seq_list->head = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+
+ return seq;
+}
+
+static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
+{
+ u16 i;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ for (i = 0; i <= seq_list->mask; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+}
+
+static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ /* All appends happen in constant time */
+
+ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
+}
+
+static void l2cap_chan_timeout(struct work_struct *work)
+{
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ chan_timer.work);
+ struct l2cap_conn *conn = chan->conn;
+ int reason;
+
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(chan);
+
+ if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
+ reason = ECONNREFUSED;
+ else if (chan->state == BT_CONNECT &&
+ chan->sec_level != BT_SECURITY_SDP)
+ reason = ECONNREFUSED;
+ else
+ reason = ETIMEDOUT;
+
+ l2cap_chan_close(chan, reason);
+
+ l2cap_chan_unlock(chan);
+
+ chan->ops->close(chan);
+ mutex_unlock(&conn->chan_lock);
+
+ l2cap_chan_put(chan);
+}
+
+struct l2cap_chan *l2cap_chan_create(void)
+{
+ struct l2cap_chan *chan;
+
+ chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
+ if (!chan)
+ return NULL;
+
+ mutex_init(&chan->lock);
+
+ write_lock(&chan_list_lock);
+ list_add(&chan->global_l, &chan_list);
+ write_unlock(&chan_list_lock);
+
+ INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
+
+ chan->state = BT_OPEN;
+
+ kref_init(&chan->kref);
+
+ /* This flag is cleared in l2cap_chan_ready() */
+ set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
+
+ BT_DBG("chan %p", chan);
+
+ return chan;
+}
+
+static void l2cap_chan_destroy(struct kref *kref)
+{
+ struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
+
+ BT_DBG("chan %p", chan);
+
+ write_lock(&chan_list_lock);
+ list_del(&chan->global_l);
+ write_unlock(&chan_list_lock);
+
+ kfree(chan);
+}
+
+void l2cap_chan_hold(struct l2cap_chan *c)
+{
+ BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+
+ kref_get(&c->kref);
+}
+
+void l2cap_chan_put(struct l2cap_chan *c)
+{
+ BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+
+ kref_put(&c->kref, l2cap_chan_destroy);
+}
+
+void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+{
+ chan->fcs = L2CAP_FCS_CRC16;
+ chan->max_tx = L2CAP_DEFAULT_MAX_TX;
+ chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->remote_max_tx = chan->max_tx;
+ chan->remote_tx_win = chan->tx_win;
+ chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->sec_level = BT_SECURITY_LOW;
+ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+ chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+ chan->conf_state = 0;
+
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+}
+
+static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
+{
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+ chan->tx_credits = 0;
+ chan->rx_credits = le_max_credits;
+ chan->mps = min_t(u16, chan->imtu, le_default_mps);
+
+ skb_queue_head_init(&chan->tx_q);
+}
+
+void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+{
+ BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
+ __le16_to_cpu(chan->psm), chan->dcid);
+
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
+
+ chan->conn = conn;
+
+ switch (chan->chan_type) {
+ case L2CAP_CHAN_CONN_ORIENTED:
+ /* Alloc CID for connection-oriented socket */
+ chan->scid = l2cap_alloc_cid(conn);
+ if (conn->hcon->type == ACL_LINK)
+ chan->omtu = L2CAP_DEFAULT_MTU;
+ break;
+
+ case L2CAP_CHAN_CONN_LESS:
+ /* Connectionless socket */
+ chan->scid = L2CAP_CID_CONN_LESS;
+ chan->dcid = L2CAP_CID_CONN_LESS;
+ chan->omtu = L2CAP_DEFAULT_MTU;
+ break;
+
+ case L2CAP_CHAN_FIXED:
+ /* Caller will set CID and CID specific MTU values */
+ break;
+
+ default:
+ /* Raw socket can send/recv signalling messages only */
+ chan->scid = L2CAP_CID_SIGNALING;
+ chan->dcid = L2CAP_CID_SIGNALING;
+ chan->omtu = L2CAP_DEFAULT_MTU;
+ }
+
+ chan->local_id = L2CAP_BESTEFFORT_ID;
+ chan->local_stype = L2CAP_SERV_BESTEFFORT;
+ chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
+ chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
+ chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
+ chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
+
+ l2cap_chan_hold(chan);
+
+ hci_conn_hold(conn->hcon);
+
+ list_add(&chan->list, &conn->chan_l);
+}
+
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+{
+ mutex_lock(&conn->chan_lock);
+ __l2cap_chan_add(conn, chan);
+ mutex_unlock(&conn->chan_lock);
+}
+
+void l2cap_chan_del(struct l2cap_chan *chan, int err)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ __clear_chan_timer(chan);
+
+ BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
+
+ if (conn) {
+ struct amp_mgr *mgr = conn->hcon->amp_mgr;
+ /* Delete from channel list */
+ list_del(&chan->list);
+
+ l2cap_chan_put(chan);
+
+ chan->conn = NULL;
+
+ if (chan->scid != L2CAP_CID_A2MP)
+ hci_conn_drop(conn->hcon);
+
+ if (mgr && mgr->bredr_chan == chan)
+ mgr->bredr_chan = NULL;
+ }
+
+ if (chan->hs_hchan) {
+ struct hci_chan *hs_hchan = chan->hs_hchan;
+
+ BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
+ amp_disconnect_logical_link(hs_hchan);
+ }
+
+ chan->ops->teardown(chan, err);
+
+ if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
+ return;
+
+ switch(chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+
+ case L2CAP_MODE_LE_FLOWCTL:
+ skb_queue_purge(&chan->tx_q);
+ break;
+
+ case L2CAP_MODE_ERTM:
+ __clear_retrans_timer(chan);
+ __clear_monitor_timer(chan);
+ __clear_ack_timer(chan);
+
+ skb_queue_purge(&chan->srej_q);
+
+ l2cap_seq_list_free(&chan->srej_list);
+ l2cap_seq_list_free(&chan->retrans_list);
+
+ /* fall through */
+
+ case L2CAP_MODE_STREAMING:
+ skb_queue_purge(&chan->tx_q);
+ break;
+ }
+
+ return;
+}
+
+void l2cap_conn_update_id_addr(struct hci_conn *hcon)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_chan *chan;
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ l2cap_chan_lock(chan);
+ bacpy(&chan->dst, &hcon->dst);
+ chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
+ l2cap_chan_unlock(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+}
+
+static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_le_conn_rsp rsp;
+ u16 result;
+
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
+ result = L2CAP_CR_AUTHORIZATION;
+ else
+ result = L2CAP_CR_BAD_PSM;
+
+ l2cap_state_change(chan, BT_DISCONN);
+
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.mtu = cpu_to_le16(chan->imtu);
+ rsp.mps = cpu_to_le16(chan->mps);
+ rsp.credits = cpu_to_le16(chan->rx_credits);
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
+ &rsp);
+}
+
+static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn_rsp rsp;
+ u16 result;
+
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
+ result = L2CAP_CR_SEC_BLOCK;
+ else
+ result = L2CAP_CR_BAD_PSM;
+
+ l2cap_state_change(chan, BT_DISCONN);
+
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
+}
+
+void l2cap_chan_close(struct l2cap_chan *chan, int reason)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+
+ switch (chan->state) {
+ case BT_LISTEN:
+ chan->ops->teardown(chan, 0);
+ break;
+
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+ l2cap_send_disconn_req(chan, reason);
+ } else
+ l2cap_chan_del(chan, reason);
+ break;
+
+ case BT_CONNECT2:
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
+ if (conn->hcon->type == ACL_LINK)
+ l2cap_chan_connect_reject(chan);
+ else if (conn->hcon->type == LE_LINK)
+ l2cap_chan_le_connect_reject(chan);
+ }
+
+ l2cap_chan_del(chan, reason);
+ break;
+
+ case BT_CONNECT:
+ case BT_DISCONN:
+ l2cap_chan_del(chan, reason);
+ break;
+
+ default:
+ chan->ops->teardown(chan, 0);
+ break;
+ }
+}
+
+static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
+{
+ switch (chan->chan_type) {
+ case L2CAP_CHAN_RAW:
+ switch (chan->sec_level) {
+ case BT_SECURITY_HIGH:
+ case BT_SECURITY_FIPS:
+ return HCI_AT_DEDICATED_BONDING_MITM;
+ case BT_SECURITY_MEDIUM:
+ return HCI_AT_DEDICATED_BONDING;
+ default:
+ return HCI_AT_NO_BONDING;
+ }
+ break;
+ case L2CAP_CHAN_CONN_LESS:
+ if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
+ if (chan->sec_level == BT_SECURITY_LOW)
+ chan->sec_level = BT_SECURITY_SDP;
+ }
+ if (chan->sec_level == BT_SECURITY_HIGH ||
+ chan->sec_level == BT_SECURITY_FIPS)
+ return HCI_AT_NO_BONDING_MITM;
+ else
+ return HCI_AT_NO_BONDING;
+ break;
+ case L2CAP_CHAN_CONN_ORIENTED:
+ if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
+ if (chan->sec_level == BT_SECURITY_LOW)
+ chan->sec_level = BT_SECURITY_SDP;
+
+ if (chan->sec_level == BT_SECURITY_HIGH ||
+ chan->sec_level == BT_SECURITY_FIPS)
+ return HCI_AT_NO_BONDING_MITM;
+ else
+ return HCI_AT_NO_BONDING;
+ }
+ /* fall through */
+ default:
+ switch (chan->sec_level) {
+ case BT_SECURITY_HIGH:
+ case BT_SECURITY_FIPS:
+ return HCI_AT_GENERAL_BONDING_MITM;
+ case BT_SECURITY_MEDIUM:
+ return HCI_AT_GENERAL_BONDING;
+ default:
+ return HCI_AT_NO_BONDING;
+ }
+ break;
+ }
+}
+
+/* Service level security */
+int l2cap_chan_check_security(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ __u8 auth_type;
+
+ if (conn->hcon->type == LE_LINK)
+ return smp_conn_security(conn->hcon, chan->sec_level);
+
+ auth_type = l2cap_get_auth_type(chan);
+
+ return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
+}
+
+static u8 l2cap_get_ident(struct l2cap_conn *conn)
+{
+ u8 id;
+
+ /* Get next available identificator.
+ * 1 - 128 are used by kernel.
+ * 129 - 199 are reserved.
+ * 200 - 254 are used by utilities like l2ping, etc.
+ */
+
+ spin_lock(&conn->lock);
+
+ if (++conn->tx_ident > 128)
+ conn->tx_ident = 1;
+
+ id = conn->tx_ident;
+
+ spin_unlock(&conn->lock);
+
+ return id;
+}
+
+static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ void *data)
+{
+ struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
+ u8 flags;
+
+ BT_DBG("code 0x%2.2x", code);
+
+ if (!skb)
+ return;
+
+ if (lmp_no_flush_capable(conn->hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+ skb->priority = HCI_PRIO_MAX;
+
+ hci_send_acl(conn->hchan, skb, flags);
+}
+
+static bool __chan_is_moving(struct l2cap_chan *chan)
+{
+ return chan->move_state != L2CAP_MOVE_STABLE &&
+ chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
+}
+
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct hci_conn *hcon = chan->conn->hcon;
+ u16 flags;
+
+ BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+ skb->priority);
+
+ if (chan->hs_hcon && !__chan_is_moving(chan)) {
+ if (chan->hs_hchan)
+ hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
+ else
+ kfree_skb(skb);
+
+ return;
+ }
+
+ if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+ lmp_no_flush_capable(hcon->hdev))
+ flags = ACL_START_NO_FLUSH;
+ else
+ flags = ACL_START;
+
+ bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ hci_send_acl(chan->conn->hchan, skb, flags);
+}
+
+static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
+{
+ control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enh & L2CAP_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
+{
+ control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void __unpack_control(struct l2cap_chan *chan,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ __unpack_extended_control(get_unaligned_le32(skb->data),
+ &bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
+ } else {
+ __unpack_enhanced_control(get_unaligned_le16(skb->data),
+ &bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
+ }
+}
+
+static u32 __pack_extended_control(struct l2cap_ctrl *control)
+{
+ u32 packed;
+
+ packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
+{
+ u16 packed;
+
+ packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static inline void __pack_control(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ }
+}
+
+static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ return L2CAP_EXT_HDR_SIZE;
+ else
+ return L2CAP_ENH_HDR_SIZE;
+}
+
+static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
+ u32 control)
+{
+ struct sk_buff *skb;
+ struct l2cap_hdr *lh;
+ int hlen = __ertm_hdr_size(chan);
+
+ if (chan->fcs == L2CAP_FCS_CRC16)
+ hlen += L2CAP_FCS_SIZE;
+
+ skb = bt_skb_alloc(hlen, GFP_KERNEL);
+
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
+ else
+ put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
+ }
+
+ skb->priority = HCI_PRIO_MAX;
+ return skb;
+}
+
+static void l2cap_send_sframe(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ struct sk_buff *skb;
+ u32 control_field;
+
+ BT_DBG("chan %p, control %p", chan, control);
+
+ if (!control->sframe)
+ return;
+
+ if (__chan_is_moving(chan))
+ return;
+
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
+ !control->poll)
+ control->final = 1;
+
+ if (control->super == L2CAP_SUPER_RR)
+ clear_bit(CONN_RNR_SENT, &chan->conn_state);
+ else if (control->super == L2CAP_SUPER_RNR)
+ set_bit(CONN_RNR_SENT, &chan->conn_state);
+
+ if (control->super != L2CAP_SUPER_SREJ) {
+ chan->last_acked_seq = control->reqseq;
+ __clear_ack_timer(chan);
+ }
+
+ BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
+ control->final, control->poll, control->super);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ control_field = __pack_extended_control(control);
+ else
+ control_field = __pack_enhanced_control(control);
+
+ skb = l2cap_create_sframe_pdu(chan, control_field);
+ if (!IS_ERR(skb))
+ l2cap_do_send(chan, skb);
+}
+
+static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p, poll %d", chan, poll);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.poll = poll;
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ control.super = L2CAP_SUPER_RNR;
+ else
+ control.super = L2CAP_SUPER_RR;
+
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+}
+
+static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
+{
+ return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
+}
+
+static bool __amp_capable(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct hci_dev *hdev;
+ bool amp_available = false;
+
+ if (!conn->hs_enabled)
+ return false;
+
+ if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
+ return false;
+
+ read_lock(&hci_dev_list_lock);
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ if (hdev->amp_type != AMP_TYPE_BREDR &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ amp_available = true;
+ break;
+ }
+ }
+ read_unlock(&hci_dev_list_lock);
+
+ if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
+ return amp_available;
+
+ return false;
+}
+
+static bool l2cap_check_efs(struct l2cap_chan *chan)
+{
+ /* Check EFS parameters */
+ return true;
+}
+
+void l2cap_send_conn_req(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_conn_req req;
+
+ req.scid = cpu_to_le16(chan->scid);
+ req.psm = chan->psm;
+
+ chan->ident = l2cap_get_ident(conn);
+
+ set_bit(CONF_CONNECT_PEND, &chan->conf_state);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
+}
+
+static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
+{
+ struct l2cap_create_chan_req req;
+ req.scid = cpu_to_le16(chan->scid);
+ req.psm = chan->psm;
+ req.amp_id = amp_id;
+
+ chan->ident = l2cap_get_ident(chan->conn);
+
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
+ sizeof(req), &req);
+}
+
+static void l2cap_move_setup(struct l2cap_chan *chan)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("chan %p", chan);
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
+
+ __clear_retrans_timer(chan);
+ __clear_monitor_timer(chan);
+ __clear_ack_timer(chan);
+
+ chan->retry_count = 0;
+ skb_queue_walk(&chan->tx_q, skb) {
+ if (bt_cb(skb)->control.retries)
+ bt_cb(skb)->control.retries = 1;
+ else
+ break;
+ }
+
+ chan->expected_tx_seq = chan->buffer_seq;
+
+ clear_bit(CONN_REJ_ACT, &chan->conn_state);
+ clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ l2cap_seq_list_clear(&chan->srej_list);
+ skb_queue_purge(&chan->srej_q);
+
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
+ chan->rx_state = L2CAP_RX_STATE_MOVE;
+
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+}
+
+static void l2cap_move_done(struct l2cap_chan *chan)
+{
+ u8 move_role = chan->move_role;
+ BT_DBG("chan %p", chan);
+
+ chan->move_state = L2CAP_MOVE_STABLE;
+ chan->move_role = L2CAP_MOVE_ROLE_NONE;
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
+
+ switch (move_role) {
+ case L2CAP_MOVE_ROLE_INITIATOR:
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
+ chan->rx_state = L2CAP_RX_STATE_WAIT_F;
+ break;
+ case L2CAP_MOVE_ROLE_RESPONDER:
+ chan->rx_state = L2CAP_RX_STATE_WAIT_P;
+ break;
+ }
+}
+
+static void l2cap_chan_ready(struct l2cap_chan *chan)
+{
+ /* This clears all conf flags, including CONF_NOT_COMPLETE */
+ chan->conf_state = 0;
+ __clear_chan_timer(chan);
+
+ if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
+ chan->ops->suspend(chan);
+
+ chan->state = BT_CONNECTED;
+
+ chan->ops->ready(chan);
+}
+
+static void l2cap_le_connect(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_le_conn_req req;
+
+ if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
+ return;
+
+ req.psm = chan->psm;
+ req.scid = cpu_to_le16(chan->scid);
+ req.mtu = cpu_to_le16(chan->imtu);
+ req.mps = cpu_to_le16(chan->mps);
+ req.credits = cpu_to_le16(chan->rx_credits);
+
+ chan->ident = l2cap_get_ident(conn);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
+ sizeof(req), &req);
+}
+
+static void l2cap_le_start(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ if (!smp_conn_security(conn->hcon, chan->sec_level))
+ return;
+
+ if (!chan->psm) {
+ l2cap_chan_ready(chan);
+ return;
+ }
+
+ if (chan->state == BT_CONNECT)
+ l2cap_le_connect(chan);
+}
+
+static void l2cap_start_connection(struct l2cap_chan *chan)
+{
+ if (__amp_capable(chan)) {
+ BT_DBG("chan %p AMP capable: discover AMPs", chan);
+ a2mp_discover_amp(chan);
+ } else if (chan->conn->hcon->type == LE_LINK) {
+ l2cap_le_start(chan);
+ } else {
+ l2cap_send_conn_req(chan);
+ }
+}
+
+static void l2cap_do_start(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_le_start(chan);
+ return;
+ }
+
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
+ if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
+ return;
+
+ if (l2cap_chan_check_security(chan) &&
+ __l2cap_no_conn_pending(chan)) {
+ l2cap_start_connection(chan);
+ }
+ } else {
+ struct l2cap_info_req req;
+ req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
+ conn->info_ident = l2cap_get_ident(conn);
+
+ schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
+
+ l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
+ sizeof(req), &req);
+ }
+}
+
+static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
+{
+ u32 local_feat_mask = l2cap_feat_mask;
+ if (!disable_ertm)
+ local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
+
+ switch (mode) {
+ case L2CAP_MODE_ERTM:
+ return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
+ case L2CAP_MODE_STREAMING:
+ return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
+ default:
+ return 0x00;
+ }
+}
+
+static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_disconn_req req;
+
+ if (!conn)
+ return;
+
+ if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
+ __clear_retrans_timer(chan);
+ __clear_monitor_timer(chan);
+ __clear_ack_timer(chan);
+ }
+
+ if (chan->scid == L2CAP_CID_A2MP) {
+ l2cap_state_change(chan, BT_DISCONN);
+ return;
+ }
+
+ req.dcid = cpu_to_le16(chan->dcid);
+ req.scid = cpu_to_le16(chan->scid);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
+ sizeof(req), &req);
+
+ l2cap_state_change_and_error(chan, BT_DISCONN, err);
+}
+
+/* ---- L2CAP connections ---- */
+static void l2cap_conn_start(struct l2cap_conn *conn)
+{
+ struct l2cap_chan *chan, *tmp;
+
+ BT_DBG("conn %p", conn);
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
+ l2cap_chan_lock(chan);
+
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (chan->state == BT_CONNECT) {
+ if (!l2cap_chan_check_security(chan) ||
+ !__l2cap_no_conn_pending(chan)) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
+ && test_bit(CONF_STATE2_DEVICE,
+ &chan->conf_state)) {
+ l2cap_chan_close(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ l2cap_start_connection(chan);
+
+ } else if (chan->state == BT_CONNECT2) {
+ struct l2cap_conn_rsp rsp;
+ char buf[128];
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+
+ if (l2cap_chan_check_security(chan)) {
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ rsp.result = cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
+ chan->ops->defer(chan);
+
+ } else {
+ l2cap_state_change(chan, BT_CONFIG);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ }
+ } else {
+ rsp.result = cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
+ }
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
+ rsp.result != L2CAP_CR_SUCCESS) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+ }
+
+ l2cap_chan_unlock(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+}
+
+/* Find socket with cid and source/destination bdaddr.
+ * Returns closest match, locked.
+ */
+static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
+ bdaddr_t *src,
+ bdaddr_t *dst)
+{
+ struct l2cap_chan *c, *c1 = NULL;
+
+ read_lock(&chan_list_lock);
+
+ list_for_each_entry(c, &chan_list, global_l) {
+ if (state && c->state != state)
+ continue;
+
+ if (c->scid == cid) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
+ /* Exact match. */
+ src_match = !bacmp(&c->src, src);
+ dst_match = !bacmp(&c->dst, dst);
+ if (src_match && dst_match) {
+ read_unlock(&chan_list_lock);
+ return c;
+ }
+
+ /* Closest match */
+ src_any = !bacmp(&c->src, BDADDR_ANY);
+ dst_any = !bacmp(&c->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
+ c1 = c;
+ }
+ }
+
+ read_unlock(&chan_list_lock);
+
+ return c1;
+}
+
+static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_chan *chan, *pchan;
+ u8 dst_type;
+
+ BT_DBG("");
+
+ bt_6lowpan_add_conn(conn);
+
+ /* Check if we have socket listening on cid */
+ pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
+ &hcon->src, &hcon->dst);
+ if (!pchan)
+ return;
+
+ /* Client ATT sockets should override the server one */
+ if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
+ return;
+
+ dst_type = bdaddr_type(hcon, hcon->dst_type);
+
+ /* If device is blocked, do not create a channel for it */
+ if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
+ return;
+
+ l2cap_chan_lock(pchan);
+
+ chan = pchan->ops->new_connection(pchan);
+ if (!chan)
+ goto clean;
+
+ bacpy(&chan->src, &hcon->src);
+ bacpy(&chan->dst, &hcon->dst);
+ chan->src_type = bdaddr_type(hcon, hcon->src_type);
+ chan->dst_type = dst_type;
+
+ __l2cap_chan_add(conn, chan);
+
+clean:
+ l2cap_chan_unlock(pchan);
+}
+
+static void l2cap_conn_ready(struct l2cap_conn *conn)
+{
+ struct l2cap_chan *chan;
+ struct hci_conn *hcon = conn->hcon;
+
+ BT_DBG("conn %p", conn);
+
+ /* For outgoing pairing which doesn't necessarily have an
+ * associated socket (e.g. mgmt_pair_device).
+ */
+ if (hcon->out && hcon->type == LE_LINK)
+ smp_conn_security(hcon, hcon->pending_sec_level);
+
+ mutex_lock(&conn->chan_lock);
+
+ if (hcon->type == LE_LINK)
+ l2cap_le_conn_ready(conn);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+
+ l2cap_chan_lock(chan);
+
+ if (chan->scid == L2CAP_CID_A2MP) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (hcon->type == LE_LINK) {
+ l2cap_le_start(chan);
+ } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ l2cap_chan_ready(chan);
+
+ } else if (chan->state == BT_CONNECT) {
+ l2cap_do_start(chan);
+ }
+
+ l2cap_chan_unlock(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+
+ queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
+}
+
+/* Notify sockets that we cannot guaranty reliability anymore */
+static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
+{
+ struct l2cap_chan *chan;
+
+ BT_DBG("conn %p", conn);
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
+ l2cap_chan_set_err(chan, err);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+}
+
+static void l2cap_info_timeout(struct work_struct *work)
+{
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ info_timer.work);
+
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ conn->info_ident = 0;
+
+ l2cap_conn_start(conn);
+}
+
+/*
+ * l2cap_user
+ * External modules can register l2cap_user objects on l2cap_conn. The ->probe
+ * callback is called during registration. The ->remove callback is called
+ * during unregistration.
+ * An l2cap_user object can either be explicitly unregistered or when the
+ * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
+ * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
+ * External modules must own a reference to the l2cap_conn object if they intend
+ * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
+ * any time if they don't.
+ */
+
+int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
+{
+ struct hci_dev *hdev = conn->hcon->hdev;
+ int ret;
+
+ /* We need to check whether l2cap_conn is registered. If it is not, we
+ * must not register the l2cap_user. l2cap_conn_del() is unregisters
+ * l2cap_conn objects, but doesn't provide its own locking. Instead, it
+ * relies on the parent hci_conn object to be locked. This itself relies
+ * on the hci_dev object to be locked. So we must lock the hci device
+ * here, too. */
+
+ hci_dev_lock(hdev);
+
+ if (user->list.next || user->list.prev) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* conn->hchan is NULL after l2cap_conn_del() was called */
+ if (!conn->hchan) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = user->probe(conn, user);
+ if (ret)
+ goto out_unlock;
+
+ list_add(&user->list, &conn->users);
+ ret = 0;
+
+out_unlock:
+ hci_dev_unlock(hdev);
+ return ret;
+}
+EXPORT_SYMBOL(l2cap_register_user);
+
+void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
+{
+ struct hci_dev *hdev = conn->hcon->hdev;
+
+ hci_dev_lock(hdev);
+
+ if (!user->list.next || !user->list.prev)
+ goto out_unlock;
+
+ list_del(&user->list);
+ user->list.next = NULL;
+ user->list.prev = NULL;
+ user->remove(conn, user);
+
+out_unlock:
+ hci_dev_unlock(hdev);
+}
+EXPORT_SYMBOL(l2cap_unregister_user);
+
+static void l2cap_unregister_all_users(struct l2cap_conn *conn)
+{
+ struct l2cap_user *user;
+
+ while (!list_empty(&conn->users)) {
+ user = list_first_entry(&conn->users, struct l2cap_user, list);
+ list_del(&user->list);
+ user->list.next = NULL;
+ user->list.prev = NULL;
+ user->remove(conn, user);
+ }
+}
+
+static void l2cap_conn_del(struct hci_conn *hcon, int err)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_chan *chan, *l;
+
+ if (!conn)
+ return;
+
+ BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+
+ kfree_skb(conn->rx_skb);
+
+ skb_queue_purge(&conn->pending_rx);
+
+ /* We can not call flush_work(&conn->pending_rx_work) here since we
+ * might block if we are running on a worker from the same workqueue
+ * pending_rx_work is waiting on.
+ */
+ if (work_pending(&conn->pending_rx_work))
+ cancel_work_sync(&conn->pending_rx_work);
+
+ l2cap_unregister_all_users(conn);
+
+ mutex_lock(&conn->chan_lock);
+
+ /* Kill channels */
+ list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
+
+ l2cap_chan_del(chan, err);
+
+ l2cap_chan_unlock(chan);
+
+ chan->ops->close(chan);
+ l2cap_chan_put(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+
+ hci_chan_del(conn->hchan);
+
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+ cancel_delayed_work_sync(&conn->info_timer);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
+ cancel_delayed_work_sync(&conn->security_timer);
+ smp_chan_destroy(conn);
+ }
+
+ hcon->l2cap_data = NULL;
+ conn->hchan = NULL;
+ l2cap_conn_put(conn);
+}
+
+static void security_timeout(struct work_struct *work)
+{
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ security_timer.work);
+
+ BT_DBG("conn %p", conn);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
+ smp_chan_destroy(conn);
+ l2cap_conn_del(conn->hcon, ETIMEDOUT);
+ }
+}
+
+static void l2cap_conn_free(struct kref *ref)
+{
+ struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
+
+ hci_conn_put(conn->hcon);
+ kfree(conn);
+}
+
+void l2cap_conn_get(struct l2cap_conn *conn)
+{
+ kref_get(&conn->ref);
+}
+EXPORT_SYMBOL(l2cap_conn_get);
+
+void l2cap_conn_put(struct l2cap_conn *conn)
+{
+ kref_put(&conn->ref, l2cap_conn_free);
+}
+EXPORT_SYMBOL(l2cap_conn_put);
+
+/* ---- Socket interface ---- */
+
+/* Find socket with psm and source / destination bdaddr.
+ * Returns closest match.
+ */
+static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
+ bdaddr_t *src,
+ bdaddr_t *dst,
+ u8 link_type)
+{
+ struct l2cap_chan *c, *c1 = NULL;
+
+ read_lock(&chan_list_lock);
+
+ list_for_each_entry(c, &chan_list, global_l) {
+ if (state && c->state != state)
+ continue;
+
+ if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
+ continue;
+
+ if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
+ continue;
+
+ if (c->psm == psm) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
+ /* Exact match. */
+ src_match = !bacmp(&c->src, src);
+ dst_match = !bacmp(&c->dst, dst);
+ if (src_match && dst_match) {
+ read_unlock(&chan_list_lock);
+ return c;
+ }
+
+ /* Closest match */
+ src_any = !bacmp(&c->src, BDADDR_ANY);
+ dst_any = !bacmp(&c->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
+ c1 = c;
+ }
+ }
+
+ read_unlock(&chan_list_lock);
+
+ return c1;
+}
+
+static void l2cap_monitor_timeout(struct work_struct *work)
+{
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ monitor_timer.work);
+
+ BT_DBG("chan %p", chan);
+
+ l2cap_chan_lock(chan);
+
+ if (!chan->conn) {
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ return;
+ }
+
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+}
+
+static void l2cap_retrans_timeout(struct work_struct *work)
+{
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ retrans_timer.work);
+
+ BT_DBG("chan %p", chan);
+
+ l2cap_chan_lock(chan);
+
+ if (!chan->conn) {
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ return;
+ }
+
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+}
+
+static void l2cap_streaming_send(struct l2cap_chan *chan,
+ struct sk_buff_head *skbs)
+{
+ struct sk_buff *skb;
+ struct l2cap_ctrl *control;
+
+ BT_DBG("chan %p, skbs %p", chan, skbs);
+
+ if (__chan_is_moving(chan))
+ return;
+
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+
+ while (!skb_queue_empty(&chan->tx_q)) {
+
+ skb = skb_dequeue(&chan->tx_q);
+
+ bt_cb(skb)->control.retries = 1;
+ control = &bt_cb(skb)->control;
+
+ control->reqseq = 0;
+ control->txseq = chan->next_tx_seq;
+
+ __pack_control(chan, control, skb);
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
+ }
+
+ l2cap_do_send(chan, skb);
+
+ BT_DBG("Sent txseq %u", control->txseq);
+
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->frames_sent++;
+ }
+}
+
+static int l2cap_ertm_send(struct l2cap_chan *chan)
+{
+ struct sk_buff *skb, *tx_skb;
+ struct l2cap_ctrl *control;
+ int sent = 0;
+
+ BT_DBG("chan %p", chan);
+
+ if (chan->state != BT_CONNECTED)
+ return -ENOTCONN;
+
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
+
+ if (__chan_is_moving(chan))
+ return 0;
+
+ while (chan->tx_send_head &&
+ chan->unacked_frames < chan->remote_tx_win &&
+ chan->tx_state == L2CAP_TX_STATE_XMIT) {
+
+ skb = chan->tx_send_head;
+
+ bt_cb(skb)->control.retries = 1;
+ control = &bt_cb(skb)->control;
+
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
+ control->final = 1;
+
+ control->reqseq = chan->buffer_seq;
+ chan->last_acked_seq = chan->buffer_seq;
+ control->txseq = chan->next_tx_seq;
+
+ __pack_control(chan, control, skb);
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
+ }
+
+ /* Clone after data has been modified. Data is assumed to be
+ read-only (for locking purposes) on cloned sk_buffs.
+ */
+ tx_skb = skb_clone(skb, GFP_KERNEL);
+
+ if (!tx_skb)
+ break;
+
+ __set_retrans_timer(chan);
+
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->unacked_frames++;
+ chan->frames_sent++;
+ sent++;
+
+ if (skb_queue_is_last(&chan->tx_q, skb))
+ chan->tx_send_head = NULL;
+ else
+ chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
+
+ l2cap_do_send(chan, tx_skb);
+ BT_DBG("Sent txseq %u", control->txseq);
+ }
+
+ BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
+ chan->unacked_frames, skb_queue_len(&chan->tx_q));
+
+ return sent;
+}
+
+static void l2cap_ertm_resend(struct l2cap_chan *chan)
+{
+ struct l2cap_ctrl control;
+ struct sk_buff *skb;
+ struct sk_buff *tx_skb;
+ u16 seq;
+
+ BT_DBG("chan %p", chan);
+
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return;
+
+ if (__chan_is_moving(chan))
+ return;
+
+ while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
+ seq = l2cap_seq_list_pop(&chan->retrans_list);
+
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
+ if (!skb) {
+ BT_DBG("Error: Can't retransmit seq %d, frame missing",
+ seq);
+ continue;
+ }
+
+ bt_cb(skb)->control.retries++;
+ control = bt_cb(skb)->control;
+
+ if (chan->max_tx != 0 &&
+ bt_cb(skb)->control.retries > chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ }
+
+ control.reqseq = chan->buffer_seq;
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
+ control.final = 1;
+ else
+ control.final = 0;
+
+ if (skb_cloned(skb)) {
+ /* Cloned sk_buffs are read-only, so we need a
+ * writeable copy
+ */
+ tx_skb = skb_copy(skb, GFP_KERNEL);
+ } else {
+ tx_skb = skb_clone(skb, GFP_KERNEL);
+ }
+
+ if (!tx_skb) {
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ }
+
+ /* Update skb contents */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ }
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
+ put_unaligned_le16(fcs, skb_put(tx_skb,
+ L2CAP_FCS_SIZE));
+ }
+
+ l2cap_do_send(chan, tx_skb);
+
+ BT_DBG("Resent txseq %d", control.txseq);
+
+ chan->last_acked_seq = chan->buffer_seq;
+ }
+}
+
+static void l2cap_retransmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+
+ l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
+ l2cap_ertm_resend(chan);
+}
+
+static void l2cap_retransmit_all(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("chan %p, control %p", chan, control);
+
+ if (control->poll)
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+
+ l2cap_seq_list_clear(&chan->retrans_list);
+
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return;
+
+ if (chan->unacked_frames) {
+ skb_queue_walk(&chan->tx_q, skb) {
+ if (bt_cb(skb)->control.txseq == control->reqseq ||
+ skb == chan->tx_send_head)
+ break;
+ }
+
+ skb_queue_walk_from(&chan->tx_q, skb) {
+ if (skb == chan->tx_send_head)
+ break;
+
+ l2cap_seq_list_append(&chan->retrans_list,
+ bt_cb(skb)->control.txseq);
+ }
+
+ l2cap_ertm_resend(chan);
+ }
+}
+
+static void l2cap_send_ack(struct l2cap_chan *chan)
+{
+ struct l2cap_ctrl control;
+ u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
+ int threshold;
+
+ BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
+ chan, chan->last_acked_seq, chan->buffer_seq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
+ chan->rx_state == L2CAP_RX_STATE_RECV) {
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RNR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+ } else {
+ if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
+ l2cap_ertm_send(chan);
+ /* If any i-frames were sent, they included an ack */
+ if (chan->buffer_seq == chan->last_acked_seq)
+ frames_to_ack = 0;
+ }
+
+ /* Ack now if the window is 3/4ths full.
+ * Calculate without mul or div
+ */
+ threshold = chan->ack_win;
+ threshold += threshold << 1;
+ threshold >>= 2;
+
+ BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
+ threshold);
+
+ if (frames_to_ack >= threshold) {
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+ frames_to_ack = 0;
+ }
+
+ if (frames_to_ack)
+ __set_ack_timer(chan);
+ }
+}
+
+static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
+ struct msghdr *msg, int len,
+ int count, struct sk_buff *skb)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff **frag;
+ int sent = 0;
+
+ if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
+ return -EFAULT;
+
+ sent += count;
+ len -= count;
+
+ /* Continuation fragments (no L2CAP header) */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ struct sk_buff *tmp;
+
+ count = min_t(unsigned int, conn->mtu, len);
+
+ tmp = chan->ops->alloc_skb(chan, count,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ *frag = tmp;
+
+ if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
+ return -EFAULT;
+
+ (*frag)->priority = skb->priority;
+
+ sent += count;
+ len -= count;
+
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
+ frag = &(*frag)->next;
+ }
+
+ return sent;
+}
+
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff *skb;
+ int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
+ __le16_to_cpu(chan->psm), len, priority);
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
+
+ skb->priority = priority;
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+ lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
+ put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
+
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+ return skb;
+}
+
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u32 priority)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff *skb;
+ int err, count;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("chan %p len %zu", chan, len);
+
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
+
+ skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
+
+ skb->priority = priority;
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+ lh->len = cpu_to_le16(len);
+
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+ return skb;
+}
+
+static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u16 sdulen)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff *skb;
+ int err, count, hlen;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("chan %p len %zu", chan, len);
+
+ if (!conn)
+ return ERR_PTR(-ENOTCONN);
+
+ hlen = __ertm_hdr_size(chan);
+
+ if (sdulen)
+ hlen += L2CAP_SDULEN_SIZE;
+
+ if (chan->fcs == L2CAP_FCS_CRC16)
+ hlen += L2CAP_FCS_SIZE;
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+ lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+
+ /* Control header is populated later */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
+ else
+ put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
+
+ if (sdulen)
+ put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
+
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+
+ bt_cb(skb)->control.fcs = chan->fcs;
+ bt_cb(skb)->control.retries = 0;
+ return skb;
+}
+
+static int l2cap_segment_sdu(struct l2cap_chan *chan,
+ struct sk_buff_head *seg_queue,
+ struct msghdr *msg, size_t len)
+{
+ struct sk_buff *skb;
+ u16 sdu_len;
+ size_t pdu_len;
+ u8 sar;
+
+ BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
+
+ /* It is critical that ERTM PDUs fit in a single HCI fragment,
+ * so fragmented skbs are not used. The HCI layer's handling
+ * of fragmented skbs is not compatible with ERTM's queueing.
+ */
+
+ /* PDU size is derived from the HCI MTU */
+ pdu_len = chan->conn->mtu;
+
+ /* Constrain PDU size for BR/EDR connections */
+ if (!chan->hs_hcon)
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ if (chan->fcs)
+ pdu_len -= L2CAP_FCS_SIZE;
+
+ pdu_len -= __ertm_hdr_size(chan);
+
+ /* Remote device may have requested smaller PDUs */
+ pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_UNSEGMENTED;
+ sdu_len = 0;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_START;
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+ }
+
+ while (len > 0) {
+ skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
+
+ if (IS_ERR(skb)) {
+ __skb_queue_purge(seg_queue);
+ return PTR_ERR(skb);
+ }
+
+ bt_cb(skb)->control.sar = sar;
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_END;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_CONTINUE;
+ }
+ }
+
+ return 0;
+}
+
+static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg,
+ size_t len, u16 sdulen)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct sk_buff *skb;
+ int err, count, hlen;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("chan %p len %zu", chan, len);
+
+ if (!conn)
+ return ERR_PTR(-ENOTCONN);
+
+ hlen = L2CAP_HDR_SIZE;
+
+ if (sdulen)
+ hlen += L2CAP_SDULEN_SIZE;
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+
+ skb = chan->ops->alloc_skb(chan, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(chan->dcid);
+ lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+
+ if (sdulen)
+ put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
+
+ err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+
+ return skb;
+}
+
+static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
+ struct sk_buff_head *seg_queue,
+ struct msghdr *msg, size_t len)
+{
+ struct sk_buff *skb;
+ size_t pdu_len;
+ u16 sdu_len;
+
+ BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
+
+ pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
+
+ pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
+
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+
+ while (len > 0) {
+ if (len <= pdu_len)
+ pdu_len = len;
+
+ skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
+ if (IS_ERR(skb)) {
+ __skb_queue_purge(seg_queue);
+ return PTR_ERR(skb);
+ }
+
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ u32 priority)
+{
+ struct sk_buff *skb;
+ int err;
+ struct sk_buff_head seg_queue;
+
+ if (!chan->conn)
+ return -ENOTCONN;
+
+ /* Connectionless channel */
+ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
+ skb = l2cap_create_connless_pdu(chan, msg, len, priority);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* Channel lock is released before requesting new skb and then
+ * reacquired thus we need to recheck channel state.
+ */
+ if (chan->state != BT_CONNECTED) {
+ kfree_skb(skb);
+ return -ENOTCONN;
+ }
+
+ l2cap_do_send(chan, skb);
+ return len;
+ }
+
+ switch (chan->mode) {
+ case L2CAP_MODE_LE_FLOWCTL:
+ /* Check outgoing MTU */
+ if (len > chan->omtu)
+ return -EMSGSIZE;
+
+ if (!chan->tx_credits)
+ return -EAGAIN;
+
+ __skb_queue_head_init(&seg_queue);
+
+ err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
+
+ if (chan->state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
+ }
+
+ if (err)
+ return err;
+
+ skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
+
+ while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
+ l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
+ chan->tx_credits--;
+ }
+
+ if (!chan->tx_credits)
+ chan->ops->suspend(chan);
+
+ err = len;
+
+ break;
+
+ case L2CAP_MODE_BASIC:
+ /* Check outgoing MTU */
+ if (len > chan->omtu)
+ return -EMSGSIZE;
+
+ /* Create a basic PDU */
+ skb = l2cap_create_basic_pdu(chan, msg, len, priority);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* Channel lock is released before requesting new skb and then
+ * reacquired thus we need to recheck channel state.
+ */
+ if (chan->state != BT_CONNECTED) {
+ kfree_skb(skb);
+ return -ENOTCONN;
+ }
+
+ l2cap_do_send(chan, skb);
+ err = len;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ /* Check outgoing MTU */
+ if (len > chan->omtu) {
+ err = -EMSGSIZE;
+ break;
+ }
+
+ __skb_queue_head_init(&seg_queue);
+
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
+ */
+ err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
+
+ /* The channel could have been closed while segmenting,
+ * check that it is still connected.
+ */
+ if (chan->state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
+ }
+
+ if (err)
+ break;
+
+ if (chan->mode == L2CAP_MODE_ERTM)
+ l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
+ else
+ l2cap_streaming_send(chan, &seg_queue);
+
+ err = len;
+
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
+ break;
+
+ default:
+ BT_DBG("bad state %1.1x", chan->mode);
+ err = -EBADFD;
+ }
+
+ return err;
+}
+
+static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
+{
+ struct l2cap_ctrl control;
+ u16 seq;
+
+ BT_DBG("chan %p, txseq %u", chan, txseq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+
+ for (seq = chan->expected_tx_seq; seq != txseq;
+ seq = __next_seq(chan, seq)) {
+ if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ }
+ }
+
+ chan->expected_tx_seq = __next_seq(chan, txseq);
+}
+
+static void l2cap_send_srej_tail(struct l2cap_chan *chan)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p", chan);
+
+ if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+ control.reqseq = chan->srej_list.tail;
+ l2cap_send_sframe(chan, &control);
+}
+
+static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
+{
+ struct l2cap_ctrl control;
+ u16 initial_head;
+ u16 seq;
+
+ BT_DBG("chan %p, txseq %u", chan, txseq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+
+ /* Capture initial list head to allow only one pass through the list. */
+ initial_head = chan->srej_list.head;
+
+ do {
+ seq = l2cap_seq_list_pop(&chan->srej_list);
+ if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
+ break;
+
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ } while (chan->srej_list.head != initial_head);
+}
+
+static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
+{
+ struct sk_buff *acked_skb;
+ u16 ackseq;
+
+ BT_DBG("chan %p, reqseq %u", chan, reqseq);
+
+ if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
+ return;
+
+ BT_DBG("expected_ack_seq %u, unacked_frames %u",
+ chan->expected_ack_seq, chan->unacked_frames);
+
+ for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
+ ackseq = __next_seq(chan, ackseq)) {
+
+ acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
+ if (acked_skb) {
+ skb_unlink(acked_skb, &chan->tx_q);
+ kfree_skb(acked_skb);
+ chan->unacked_frames--;
+ }
+ }
+
+ chan->expected_ack_seq = reqseq;
+
+ if (chan->unacked_frames == 0)
+ __clear_retrans_timer(chan);
+
+ BT_DBG("unacked_frames %u", chan->unacked_frames);
+}
+
+static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
+{
+ BT_DBG("chan %p", chan);
+
+ chan->expected_tx_seq = chan->buffer_seq;
+ l2cap_seq_list_clear(&chan->srej_list);
+ skb_queue_purge(&chan->srej_q);
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+}
+
+static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ l2cap_ertm_send(chan);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
+
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ __clear_ack_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RETRANS_TO:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RECV_FBIT:
+ /* Nothing to process */
+ break;
+ default:
+ break;
+ }
+}
+
+static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+ /* Queue data, but don't send. */
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
+
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ /* Fall through */
+
+ case L2CAP_EV_RECV_FBIT:
+ if (control && control->final) {
+ __clear_monitor_timer(chan);
+ if (chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
+ chan->retry_count = 0;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
+ BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
+ }
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ /* Ignore */
+ break;
+ case L2CAP_EV_MONITOR_TO:
+ if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
+ l2cap_send_rr_or_rnr(chan, 1);
+ __set_monitor_timer(chan);
+ chan->retry_count++;
+ } else {
+ l2cap_send_disconn_req(chan, ECONNABORTED);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
+ chan, control, skbs, event, chan->tx_state);
+
+ switch (chan->tx_state) {
+ case L2CAP_TX_STATE_XMIT:
+ l2cap_tx_state_xmit(chan, control, skbs, event);
+ break;
+ case L2CAP_TX_STATE_WAIT_F:
+ l2cap_tx_state_wait_f(chan, control, skbs, event);
+ break;
+ default:
+ /* Ignore event */
+ break;
+ }
+}
+
+static void l2cap_pass_to_tx(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
+}
+
+static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
+}
+
+/* Copy frame to all raw sockets on that connection */
+static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ struct l2cap_chan *chan;
+
+ BT_DBG("conn %p", conn);
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ if (chan->chan_type != L2CAP_CHAN_RAW)
+ continue;
+
+ /* Don't send frame to the channel it came from */
+ if (bt_cb(skb)->chan == chan)
+ continue;
+
+ nskb = skb_clone(skb, GFP_KERNEL);
+ if (!nskb)
+ continue;
+ if (chan->ops->recv(chan, nskb))
+ kfree_skb(nskb);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+}
+
+/* ---- L2CAP signalling commands ---- */
+static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
+ u8 ident, u16 dlen, void *data)
+{
+ struct sk_buff *skb, **frag;
+ struct l2cap_cmd_hdr *cmd;
+ struct l2cap_hdr *lh;
+ int len, count;
+
+ BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
+ conn, code, ident, dlen);
+
+ if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
+ return NULL;
+
+ len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
+ count = min_t(unsigned int, conn->mtu, len);
+
+ skb = bt_skb_alloc(count, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
+
+ if (conn->hcon->type == LE_LINK)
+ lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+ else
+ lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+
+ cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
+ cmd->code = code;
+ cmd->ident = ident;
+ cmd->len = cpu_to_le16(dlen);
+
+ if (dlen) {
+ count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
+ memcpy(skb_put(skb, count), data, count);
+ data += count;
+ }
+
+ len -= skb->len;
+
+ /* Continuation fragments (no L2CAP header) */
+ frag = &skb_shinfo(skb)->frag_list;
+ while (len) {
+ count = min_t(unsigned int, conn->mtu, len);
+
+ *frag = bt_skb_alloc(count, GFP_KERNEL);
+ if (!*frag)
+ goto fail;
+
+ memcpy(skb_put(*frag, count), data, count);
+
+ len -= count;
+ data += count;
+
+ frag = &(*frag)->next;
+ }
+
+ return skb;
+
+fail:
+ kfree_skb(skb);
+ return NULL;
+}
+
+static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ unsigned long *val)
+{
+ struct l2cap_conf_opt *opt = *ptr;
+ int len;
+
+ len = L2CAP_CONF_OPT_SIZE + opt->len;
+ *ptr += len;
+
+ *type = opt->type;
+ *olen = opt->len;
+
+ switch (opt->len) {
+ case 1:
+ *val = *((u8 *) opt->val);
+ break;
+
+ case 2:
+ *val = get_unaligned_le16(opt->val);
+ break;
+
+ case 4:
+ *val = get_unaligned_le32(opt->val);
+ break;
+
+ default:
+ *val = (unsigned long) opt->val;
+ break;
+ }
+
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
+ return len;
+}
+
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+{
+ struct l2cap_conf_opt *opt = *ptr;
+
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+
+ opt->type = type;
+ opt->len = len;
+
+ switch (len) {
+ case 1:
+ *((u8 *) opt->val) = val;
+ break;
+
+ case 2:
+ put_unaligned_le16(val, opt->val);
+ break;
+
+ case 4:
+ put_unaligned_le32(val, opt->val);
+ break;
+
+ default:
+ memcpy(opt->val, (void *) val, len);
+ break;
+ }
+
+ *ptr += L2CAP_CONF_OPT_SIZE + len;
+}
+
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+{
+ struct l2cap_conf_efs efs;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_ERTM:
+ efs.id = chan->local_id;
+ efs.stype = chan->local_stype;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+ efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ efs.id = 1;
+ efs.stype = L2CAP_SERV_BESTEFFORT;
+ efs.msdu = cpu_to_le16(chan->local_msdu);
+ efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
+ efs.acc_lat = 0;
+ efs.flush_to = 0;
+ break;
+
+ default:
+ return;
+ }
+
+ l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs);
+}
+
+static void l2cap_ack_timeout(struct work_struct *work)
+{
+ struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
+ ack_timer.work);
+ u16 frames_to_ack;
+
+ BT_DBG("chan %p", chan);
+
+ l2cap_chan_lock(chan);
+
+ frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
+
+ if (frames_to_ack)
+ l2cap_send_rr_or_rnr(chan, 0);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+}
+
+int l2cap_ertm_init(struct l2cap_chan *chan)
+{
+ int err;
+
+ chan->next_tx_seq = 0;
+ chan->expected_tx_seq = 0;
+ chan->expected_ack_seq = 0;
+ chan->unacked_frames = 0;
+ chan->buffer_seq = 0;
+ chan->frames_sent = 0;
+ chan->last_acked_seq = 0;
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ chan->local_amp_id = AMP_ID_BREDR;
+ chan->move_id = AMP_ID_BREDR;
+ chan->move_state = L2CAP_MOVE_STABLE;
+ chan->move_role = L2CAP_MOVE_ROLE_NONE;
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return 0;
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
+
+ INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
+ INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
+ INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
+
+ skb_queue_head_init(&chan->srej_q);
+
+ err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
+ if (err < 0)
+ return err;
+
+ err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
+ if (err < 0)
+ l2cap_seq_list_free(&chan->srej_list);
+
+ return err;
+}
+
+static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
+{
+ switch (mode) {
+ case L2CAP_MODE_STREAMING:
+ case L2CAP_MODE_ERTM:
+ if (l2cap_mode_supported(mode, remote_feat_mask))
+ return mode;
+ /* fall through */
+ default:
+ return L2CAP_MODE_BASIC;
+ }
+}
+
+static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
+{
+ return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+}
+
+static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
+{
+ return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+}
+
+static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
+ struct l2cap_conf_rfc *rfc)
+{
+ if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
+ u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
+
+ /* Class 1 devices have must have ERTM timeouts
+ * exceeding the Link Supervision Timeout. The
+ * default Link Supervision Timeout for AMP
+ * controllers is 10 seconds.
+ *
+ * Class 1 devices use 0xffffffff for their
+ * best-effort flush timeout, so the clamping logic
+ * will result in a timeout that meets the above
+ * requirement. ERTM timeouts are 16-bit values, so
+ * the maximum timeout is 65.535 seconds.
+ */
+
+ /* Convert timeout to milliseconds and round */
+ ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
+
+ /* This is the recommended formula for class 2 devices
+ * that start ERTM timers when packets are sent to the
+ * controller.
+ */
+ ertm_to = 3 * ertm_to + 500;
+
+ if (ertm_to > 0xffff)
+ ertm_to = 0xffff;
+
+ rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
+ rfc->monitor_timeout = rfc->retrans_timeout;
+ } else {
+ rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+ }
+}
+
+static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+{
+ if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
+ __l2cap_ews_supported(chan->conn)) {
+ /* use extended control field */
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ } else {
+ chan->tx_win = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ }
+ chan->ack_win = chan->tx_win;
+}
+
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+{
+ struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ void *ptr = req->data;
+ u16 size;
+
+ BT_DBG("chan %p", chan);
+
+ if (chan->num_conf_req || chan->num_conf_rsp)
+ goto done;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_STREAMING:
+ case L2CAP_MODE_ERTM:
+ if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
+ break;
+
+ if (__l2cap_efs_supported(chan->conn))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+
+ /* fall through */
+ default:
+ chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
+ break;
+ }
+
+done:
+ if (chan->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+ if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
+ !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
+ break;
+
+ rfc.mode = L2CAP_MODE_BASIC;
+ rfc.txwin_size = 0;
+ rfc.max_transmit = 0;
+ rfc.retrans_timeout = 0;
+ rfc.monitor_timeout = 0;
+ rfc.max_pdu_size = 0;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
+ break;
+
+ case L2CAP_MODE_ERTM:
+ rfc.mode = L2CAP_MODE_ERTM;
+ rfc.max_transmit = chan->max_tx;
+
+ __l2cap_set_ertm_timeouts(chan, &rfc);
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_txwin_setup(chan);
+
+ rfc.txwin_size = min_t(u16, chan->tx_win,
+ L2CAP_DEFAULT_TX_WINDOW);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
+
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+ chan->fcs);
+ }
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ l2cap_txwin_setup(chan);
+ rfc.mode = L2CAP_MODE_STREAMING;
+ rfc.txwin_size = 0;
+ rfc.max_transmit = 0;
+ rfc.retrans_timeout = 0;
+ rfc.monitor_timeout = 0;
+
+ size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+ L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
+ L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+ l2cap_add_opt_efs(&ptr, chan);
+
+ if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ if (chan->fcs == L2CAP_FCS_NONE ||
+ test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ chan->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+ chan->fcs);
+ }
+ break;
+ }
+
+ req->dcid = cpu_to_le16(chan->dcid);
+ req->flags = cpu_to_le16(0);
+
+ return ptr - data;
+}
+
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+{
+ struct l2cap_conf_rsp *rsp = data;
+ void *ptr = rsp->data;
+ void *req = chan->conf_req;
+ int len = chan->conf_len;
+ int type, hint, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
+ u8 remote_efs = 0;
+ u16 mtu = L2CAP_DEFAULT_MTU;
+ u16 result = L2CAP_CONF_SUCCESS;
+ u16 size;
+
+ BT_DBG("chan %p", chan);
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+
+ hint = type & L2CAP_CONF_HINT;
+ type &= L2CAP_CONF_MASK;
+
+ switch (type) {
+ case L2CAP_CONF_MTU:
+ mtu = val;
+ break;
+
+ case L2CAP_CONF_FLUSH_TO:
+ chan->flush_to = val;
+ break;
+
+ case L2CAP_CONF_QOS:
+ break;
+
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *) val, olen);
+ break;
+
+ case L2CAP_CONF_FCS:
+ if (val == L2CAP_FCS_NONE)
+ set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
+ break;
+
+ case L2CAP_CONF_EFS:
+ remote_efs = 1;
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *) val, olen);
+ break;
+
+ case L2CAP_CONF_EWS:
+ if (!chan->conn->hs_enabled)
+ return -ECONNREFUSED;
+
+ set_bit(FLAG_EXT_CTRL, &chan->flags);
+ set_bit(CONF_EWS_RECV, &chan->conf_state);
+ chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+ chan->remote_tx_win = val;
+ break;
+
+ default:
+ if (hint)
+ break;
+
+ result = L2CAP_CONF_UNKNOWN;
+ *((u8 *) ptr++) = type;
+ break;
+ }
+ }
+
+ if (chan->num_conf_rsp || chan->num_conf_req > 1)
+ goto done;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_STREAMING:
+ case L2CAP_MODE_ERTM:
+ if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
+ chan->mode = l2cap_select_mode(rfc.mode,
+ chan->conn->feat_mask);
+ break;
+ }
+
+ if (remote_efs) {
+ if (__l2cap_efs_supported(chan->conn))
+ set_bit(FLAG_EFS_ENABLE, &chan->flags);
+ else
+ return -ECONNREFUSED;
+ }
+
+ if (chan->mode != rfc.mode)
+ return -ECONNREFUSED;
+
+ break;
+ }
+
+done:
+ if (chan->mode != rfc.mode) {
+ result = L2CAP_CONF_UNACCEPT;
+ rfc.mode = chan->mode;
+
+ if (chan->num_conf_rsp == 1)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
+ }
+
+ if (result == L2CAP_CONF_SUCCESS) {
+ /* Configure output options and let the other side know
+ * which ones we don't like. */
+
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ result = L2CAP_CONF_UNACCEPT;
+ else {
+ chan->omtu = mtu;
+ set_bit(CONF_MTU_DONE, &chan->conf_state);
+ }
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+
+ if (remote_efs) {
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype) {
+
+ result = L2CAP_CONF_UNACCEPT;
+
+ if (chan->num_conf_req >= 1)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+ (unsigned long) &efs);
+ } else {
+ /* Send PENDING Conf Rsp */
+ result = L2CAP_CONF_PENDING;
+ set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ }
+ }
+
+ switch (rfc.mode) {
+ case L2CAP_MODE_BASIC:
+ chan->fcs = L2CAP_FCS_NONE;
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
+ break;
+
+ case L2CAP_MODE_ERTM:
+ if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
+ chan->remote_tx_win = rfc.txwin_size;
+ else
+ rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
+
+ chan->remote_max_tx = rfc.max_transmit;
+
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
+
+ __l2cap_set_ertm_timeouts(chan, &rfc);
+
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->remote_id = efs.id;
+ chan->remote_stype = efs.stype;
+ chan->remote_msdu = le16_to_cpu(efs.msdu);
+ chan->remote_flush_to =
+ le32_to_cpu(efs.flush_to);
+ chan->remote_acc_lat =
+ le32_to_cpu(efs.acc_lat);
+ chan->remote_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ sizeof(efs),
+ (unsigned long) &efs);
+ }
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+ chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
+ L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
+ rfc.max_pdu_size = cpu_to_le16(size);
+ chan->remote_mps = size;
+
+ set_bit(CONF_MODE_DONE, &chan->conf_state);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc);
+
+ break;
+
+ default:
+ result = L2CAP_CONF_UNACCEPT;
+
+ memset(&rfc, 0, sizeof(rfc));
+ rfc.mode = chan->mode;
+ }
+
+ if (result == L2CAP_CONF_SUCCESS)
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+ }
+ rsp->scid = cpu_to_le16(chan->dcid);
+ rsp->result = cpu_to_le16(result);
+ rsp->flags = cpu_to_le16(0);
+
+ return ptr - data;
+}
+
+static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ void *data, u16 *result)
+{
+ struct l2cap_conf_req *req = data;
+ void *ptr = req->data;
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+ struct l2cap_conf_efs efs;
+
+ BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+ switch (type) {
+ case L2CAP_CONF_MTU:
+ if (val < L2CAP_DEFAULT_MIN_MTU) {
+ *result = L2CAP_CONF_UNACCEPT;
+ chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ } else
+ chan->imtu = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ break;
+
+ case L2CAP_CONF_FLUSH_TO:
+ chan->flush_to = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+ 2, chan->flush_to);
+ break;
+
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
+
+ if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
+ rfc.mode != chan->mode)
+ return -ECONNREFUSED;
+
+ chan->fcs = 0;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+ break;
+
+ case L2CAP_CONF_EWS:
+ chan->ack_win = min_t(u16, val, chan->ack_win);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+ chan->tx_win);
+ break;
+
+ case L2CAP_CONF_EFS:
+ if (olen == sizeof(efs))
+ memcpy(&efs, (void *)val, olen);
+
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs);
+ break;
+
+ case L2CAP_CONF_FCS:
+ if (*result == L2CAP_CONF_PENDING)
+ if (val == L2CAP_FCS_NONE)
+ set_bit(CONF_RECV_NO_FCS,
+ &chan->conf_state);
+ break;
+ }
+ }
+
+ if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
+ return -ECONNREFUSED;
+
+ chan->mode = rfc.mode;
+
+ if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
+ switch (rfc.mode) {
+ case L2CAP_MODE_ERTM:
+ chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
+
+ if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ chan->local_msdu = le16_to_cpu(efs.msdu);
+ chan->local_sdu_itime =
+ le32_to_cpu(efs.sdu_itime);
+ chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
+ chan->local_flush_to =
+ le32_to_cpu(efs.flush_to);
+ }
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ }
+ }
+
+ req->dcid = cpu_to_le16(chan->dcid);
+ req->flags = cpu_to_le16(0);
+
+ return ptr - data;
+}
+
+static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
+ u16 result, u16 flags)
+{
+ struct l2cap_conf_rsp *rsp = data;
+ void *ptr = rsp->data;
+
+ BT_DBG("chan %p", chan);
+
+ rsp->scid = cpu_to_le16(chan->dcid);
+ rsp->result = cpu_to_le16(result);
+ rsp->flags = cpu_to_le16(flags);
+
+ return ptr - data;
+}
+
+void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
+{
+ struct l2cap_le_conn_rsp rsp;
+ struct l2cap_conn *conn = chan->conn;
+
+ BT_DBG("chan %p", chan);
+
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.mtu = cpu_to_le16(chan->imtu);
+ rsp.mps = cpu_to_le16(chan->mps);
+ rsp.credits = cpu_to_le16(chan->rx_credits);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
+ &rsp);
+}
+
+void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+{
+ struct l2cap_conn_rsp rsp;
+ struct l2cap_conn *conn = chan->conn;
+ u8 buf[128];
+ u8 rsp_code;
+
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+
+ if (chan->hs_hcon)
+ rsp_code = L2CAP_CREATE_CHAN_RSP;
+ else
+ rsp_code = L2CAP_CONN_RSP;
+
+ BT_DBG("chan %p rsp_code %u", chan, rsp_code);
+
+ l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
+
+ if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
+ return;
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+}
+
+static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
+{
+ int type, olen;
+ unsigned long val;
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC or extended window size option.
+ */
+ u16 txwin_ext = chan->ack_win;
+ struct l2cap_conf_rfc rfc = {
+ .mode = chan->mode,
+ .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
+ .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
+ .max_pdu_size = cpu_to_le16(chan->imtu),
+ .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
+ };
+
+ BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
+
+ if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
+ return;
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+ switch (type) {
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
+ break;
+ case L2CAP_CONF_EWS:
+ txwin_ext = val;
+ break;
+ }
+ }
+
+ switch (rfc.mode) {
+ case L2CAP_MODE_ERTM:
+ chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
+ chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
+ else
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
+ break;
+ case L2CAP_MODE_STREAMING:
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ }
+}
+
+static inline int l2cap_command_rej(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
+
+ if (cmd_len < sizeof(*rej))
+ return -EPROTO;
+
+ if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
+ return 0;
+
+ if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
+ cmd->ident == conn->info_ident) {
+ cancel_delayed_work(&conn->info_timer);
+
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ conn->info_ident = 0;
+
+ l2cap_conn_start(conn);
+ }
+
+ return 0;
+}
+
+static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u8 *data, u8 rsp_code, u8 amp_id)
+{
+ struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
+ struct l2cap_conn_rsp rsp;
+ struct l2cap_chan *chan = NULL, *pchan;
+ int result, status = L2CAP_CS_NO_INFO;
+
+ u16 dcid = 0, scid = __le16_to_cpu(req->scid);
+ __le16 psm = req->psm;
+
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
+
+ /* Check if we have socket listening on psm */
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
+ &conn->hcon->dst, ACL_LINK);
+ if (!pchan) {
+ result = L2CAP_CR_BAD_PSM;
+ goto sendresp;
+ }
+
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(pchan);
+
+ /* Check if the ACL is secure enough (if not SDP) */
+ if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
+ !hci_conn_check_link_mode(conn->hcon)) {
+ conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
+ result = L2CAP_CR_SEC_BLOCK;
+ goto response;
+ }
+
+ result = L2CAP_CR_NO_MEM;
+
+ /* Check if we already have channel with that dcid */
+ if (__l2cap_get_chan_by_dcid(conn, scid))
+ goto response;
+
+ chan = pchan->ops->new_connection(pchan);
+ if (!chan)
+ goto response;
+
+ /* For certain devices (ex: HID mouse), support for authentication,
+ * pairing and bonding is optional. For such devices, inorder to avoid
+ * the ACL alive for too long after L2CAP disconnection, reset the ACL
+ * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
+ */
+ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ bacpy(&chan->src, &conn->hcon->src);
+ bacpy(&chan->dst, &conn->hcon->dst);
+ chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+ chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
+ chan->psm = psm;
+ chan->dcid = scid;
+ chan->local_amp_id = amp_id;
+
+ __l2cap_chan_add(conn, chan);
+
+ dcid = chan->scid;
+
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+
+ chan->ident = cmd->ident;
+
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
+ if (l2cap_chan_check_security(chan)) {
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ l2cap_state_change(chan, BT_CONNECT2);
+ result = L2CAP_CR_PEND;
+ status = L2CAP_CS_AUTHOR_PEND;
+ chan->ops->defer(chan);
+ } else {
+ /* Force pending result for AMP controllers.
+ * The connection will succeed after the
+ * physical link is up.
+ */
+ if (amp_id == AMP_ID_BREDR) {
+ l2cap_state_change(chan, BT_CONFIG);
+ result = L2CAP_CR_SUCCESS;
+ } else {
+ l2cap_state_change(chan, BT_CONNECT2);
+ result = L2CAP_CR_PEND;
+ }
+ status = L2CAP_CS_NO_INFO;
+ }
+ } else {
+ l2cap_state_change(chan, BT_CONNECT2);
+ result = L2CAP_CR_PEND;
+ status = L2CAP_CS_AUTHEN_PEND;
+ }
+ } else {
+ l2cap_state_change(chan, BT_CONNECT2);
+ result = L2CAP_CR_PEND;
+ status = L2CAP_CS_NO_INFO;
+ }
+
+response:
+ l2cap_chan_unlock(pchan);
+ mutex_unlock(&conn->chan_lock);
+
+sendresp:
+ rsp.scid = cpu_to_le16(scid);
+ rsp.dcid = cpu_to_le16(dcid);
+ rsp.result = cpu_to_le16(result);
+ rsp.status = cpu_to_le16(status);
+ l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
+
+ if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
+ struct l2cap_info_req info;
+ info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
+ conn->info_ident = l2cap_get_ident(conn);
+
+ schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
+
+ l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
+ sizeof(info), &info);
+ }
+
+ if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ result == L2CAP_CR_SUCCESS) {
+ u8 buf[128];
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+ }
+
+ return chan;
+}
+
+static int l2cap_connect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+{
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+
+ if (cmd_len < sizeof(struct l2cap_conn_req))
+ return -EPROTO;
+
+ hci_dev_lock(hdev);
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
+ mgmt_device_connected(hdev, &hcon->dst, hcon->type,
+ hcon->dst_type, 0, NULL, 0,
+ hcon->dev_class);
+ hci_dev_unlock(hdev);
+
+ l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
+ return 0;
+}
+
+static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
+ u16 scid, dcid, result, status;
+ struct l2cap_chan *chan;
+ u8 req[128];
+ int err;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ scid = __le16_to_cpu(rsp->scid);
+ dcid = __le16_to_cpu(rsp->dcid);
+ result = __le16_to_cpu(rsp->result);
+ status = __le16_to_cpu(rsp->status);
+
+ BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
+ dcid, scid, result, status);
+
+ mutex_lock(&conn->chan_lock);
+
+ if (scid) {
+ chan = __l2cap_get_chan_by_scid(conn, scid);
+ if (!chan) {
+ err = -EBADSLT;
+ goto unlock;
+ }
+ } else {
+ chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+ if (!chan) {
+ err = -EBADSLT;
+ goto unlock;
+ }
+ }
+
+ err = 0;
+
+ l2cap_chan_lock(chan);
+
+ switch (result) {
+ case L2CAP_CR_SUCCESS:
+ l2cap_state_change(chan, BT_CONFIG);
+ chan->ident = 0;
+ chan->dcid = dcid;
+ clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
+
+ if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
+ break;
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, req), req);
+ chan->num_conf_req++;
+ break;
+
+ case L2CAP_CR_PEND:
+ set_bit(CONF_CONNECT_PEND, &chan->conf_state);
+ break;
+
+ default:
+ l2cap_chan_del(chan, ECONNREFUSED);
+ break;
+ }
+
+ l2cap_chan_unlock(chan);
+
+unlock:
+ mutex_unlock(&conn->chan_lock);
+
+ return err;
+}
+
+static inline void set_default_fcs(struct l2cap_chan *chan)
+{
+ /* FCS is enabled only in ERTM or streaming mode, if one or both
+ * sides request it.
+ */
+ if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
+ chan->fcs = L2CAP_FCS_NONE;
+ else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
+ chan->fcs = L2CAP_FCS_CRC16;
+}
+
+static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
+ u8 ident, u16 flags)
+{
+ struct l2cap_conn *conn = chan->conn;
+
+ BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
+ flags);
+
+ clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+ set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+ l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, data,
+ L2CAP_CONF_SUCCESS, flags), data);
+}
+
+static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
+ u16 scid, u16 dcid)
+{
+ struct l2cap_cmd_rej_cid rej;
+
+ rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
+ rej.scid = __cpu_to_le16(scid);
+ rej.dcid = __cpu_to_le16(dcid);
+
+ l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+}
+
+static inline int l2cap_config_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
+ u16 dcid, flags;
+ u8 rsp[64];
+ struct l2cap_chan *chan;
+ int len, err = 0;
+
+ if (cmd_len < sizeof(*req))
+ return -EPROTO;
+
+ dcid = __le16_to_cpu(req->dcid);
+ flags = __le16_to_cpu(req->flags);
+
+ BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
+
+ chan = l2cap_get_chan_by_scid(conn, dcid);
+ if (!chan) {
+ cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
+ return 0;
+ }
+
+ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
+ cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+ chan->dcid);
+ goto unlock;
+ }
+
+ /* Reject if config buffer is too small. */
+ len = cmd_len - sizeof(*req);
+ if (chan->conf_len + len > sizeof(chan->conf_req)) {
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_REJECT, flags), rsp);
+ goto unlock;
+ }
+
+ /* Store config. */
+ memcpy(chan->conf_req + chan->conf_len, req->data, len);
+ chan->conf_len += len;
+
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
+ /* Incomplete config. Send empty response. */
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_SUCCESS, flags), rsp);
+ goto unlock;
+ }
+
+ /* Complete config. */
+ len = l2cap_parse_conf_req(chan, rsp);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto unlock;
+ }
+
+ chan->ident = cmd->ident;
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
+ chan->num_conf_rsp++;
+
+ /* Reset config buffer. */
+ chan->conf_len = 0;
+
+ if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
+ goto unlock;
+
+ if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
+ set_default_fcs(chan);
+
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
+
+ if (err < 0)
+ l2cap_send_disconn_req(chan, -err);
+ else
+ l2cap_chan_ready(chan);
+
+ goto unlock;
+ }
+
+ if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ u8 buf[64];
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+ }
+
+ /* Got Conf Rsp PENDING from remote side and asume we sent
+ Conf Rsp PENDING in the code above */
+ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
+ test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+
+ /* check compatibility */
+
+ /* Send rsp for BR/EDR channel */
+ if (!chan->hs_hcon)
+ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
+ else
+ chan->ident = cmd->ident;
+ }
+
+unlock:
+ l2cap_chan_unlock(chan);
+ return err;
+}
+
+static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
+ u16 scid, flags, result;
+ struct l2cap_chan *chan;
+ int len = cmd_len - sizeof(*rsp);
+ int err = 0;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ scid = __le16_to_cpu(rsp->scid);
+ flags = __le16_to_cpu(rsp->flags);
+ result = __le16_to_cpu(rsp->result);
+
+ BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
+ result, len);
+
+ chan = l2cap_get_chan_by_scid(conn, scid);
+ if (!chan)
+ return 0;
+
+ switch (result) {
+ case L2CAP_CONF_SUCCESS:
+ l2cap_conf_rfc_get(chan, rsp->data, len);
+ clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+ break;
+
+ case L2CAP_CONF_PENDING:
+ set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+
+ if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+ char buf[64];
+
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+ buf, &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+ }
+
+ if (!chan->hs_hcon) {
+ l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
+ 0);
+ } else {
+ if (l2cap_check_efs(chan)) {
+ amp_create_logical_link(chan);
+ chan->ident = cmd->ident;
+ }
+ }
+ }
+ goto done;
+
+ case L2CAP_CONF_UNACCEPT:
+ if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
+ char req[64];
+
+ if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+ }
+
+ /* throw out any old stored conf requests */
+ result = L2CAP_CONF_SUCCESS;
+ len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+ req, &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+ }
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ, len, req);
+ chan->num_conf_req++;
+ if (result != L2CAP_CONF_SUCCESS)
+ goto done;
+ break;
+ }
+
+ default:
+ l2cap_chan_set_err(chan, ECONNRESET);
+
+ __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto done;
+ }
+
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION)
+ goto done;
+
+ set_bit(CONF_INPUT_DONE, &chan->conf_state);
+
+ if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
+ set_default_fcs(chan);
+
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
+
+ if (err < 0)
+ l2cap_send_disconn_req(chan, -err);
+ else
+ l2cap_chan_ready(chan);
+ }
+
+done:
+ l2cap_chan_unlock(chan);
+ return err;
+}
+
+static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
+ struct l2cap_disconn_rsp rsp;
+ u16 dcid, scid;
+ struct l2cap_chan *chan;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ scid = __le16_to_cpu(req->scid);
+ dcid = __le16_to_cpu(req->dcid);
+
+ BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
+
+ mutex_lock(&conn->chan_lock);
+
+ chan = __l2cap_get_chan_by_scid(conn, dcid);
+ if (!chan) {
+ mutex_unlock(&conn->chan_lock);
+ cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
+ return 0;
+ }
+
+ l2cap_chan_lock(chan);
+
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.scid = cpu_to_le16(chan->dcid);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
+
+ chan->ops->set_shutdown(chan);
+
+ l2cap_chan_hold(chan);
+ l2cap_chan_del(chan, ECONNRESET);
+
+ l2cap_chan_unlock(chan);
+
+ chan->ops->close(chan);
+ l2cap_chan_put(chan);
+
+ mutex_unlock(&conn->chan_lock);
+
+ return 0;
+}
+
+static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
+ u16 dcid, scid;
+ struct l2cap_chan *chan;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ scid = __le16_to_cpu(rsp->scid);
+ dcid = __le16_to_cpu(rsp->dcid);
+
+ BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
+
+ mutex_lock(&conn->chan_lock);
+
+ chan = __l2cap_get_chan_by_scid(conn, scid);
+ if (!chan) {
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
+ l2cap_chan_lock(chan);
+
+ l2cap_chan_hold(chan);
+ l2cap_chan_del(chan, 0);
+
+ l2cap_chan_unlock(chan);
+
+ chan->ops->close(chan);
+ l2cap_chan_put(chan);
+
+ mutex_unlock(&conn->chan_lock);
+
+ return 0;
+}
+
+static inline int l2cap_information_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_info_req *req = (struct l2cap_info_req *) data;
+ u16 type;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ type = __le16_to_cpu(req->type);
+
+ BT_DBG("type 0x%4.4x", type);
+
+ if (type == L2CAP_IT_FEAT_MASK) {
+ u8 buf[8];
+ u32 feat_mask = l2cap_feat_mask;
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+ rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+ if (!disable_ertm)
+ feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
+ | L2CAP_FEAT_FCS;
+ if (conn->hs_enabled)
+ feat_mask |= L2CAP_FEAT_EXT_FLOW
+ | L2CAP_FEAT_EXT_WINDOW;
+
+ put_unaligned_le32(feat_mask, rsp->data);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
+ buf);
+ } else if (type == L2CAP_IT_FIXED_CHAN) {
+ u8 buf[12];
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+
+ if (conn->hs_enabled)
+ l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
+ else
+ l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
+
+ rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+ rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+ memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
+ buf);
+ } else {
+ struct l2cap_info_rsp rsp;
+ rsp.type = cpu_to_le16(type);
+ rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
+ &rsp);
+ }
+
+ return 0;
+}
+
+static inline int l2cap_information_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
+ u16 type, result;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ type = __le16_to_cpu(rsp->type);
+ result = __le16_to_cpu(rsp->result);
+
+ BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
+
+ /* L2CAP Info req/rsp are unbound to channels, add extra checks */
+ if (cmd->ident != conn->info_ident ||
+ conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
+ return 0;
+
+ cancel_delayed_work(&conn->info_timer);
+
+ if (result != L2CAP_IR_SUCCESS) {
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ conn->info_ident = 0;
+
+ l2cap_conn_start(conn);
+
+ return 0;
+ }
+
+ switch (type) {
+ case L2CAP_IT_FEAT_MASK:
+ conn->feat_mask = get_unaligned_le32(rsp->data);
+
+ if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
+ struct l2cap_info_req req;
+ req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+
+ conn->info_ident = l2cap_get_ident(conn);
+
+ l2cap_send_cmd(conn, conn->info_ident,
+ L2CAP_INFO_REQ, sizeof(req), &req);
+ } else {
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ conn->info_ident = 0;
+
+ l2cap_conn_start(conn);
+ }
+ break;
+
+ case L2CAP_IT_FIXED_CHAN:
+ conn->fixed_chan_mask = rsp->data[0];
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ conn->info_ident = 0;
+
+ l2cap_conn_start(conn);
+ break;
+ }
+
+ return 0;
+}
+
+static int l2cap_create_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
+{
+ struct l2cap_create_chan_req *req = data;
+ struct l2cap_create_chan_rsp rsp;
+ struct l2cap_chan *chan;
+ struct hci_dev *hdev;
+ u16 psm, scid;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ if (!conn->hs_enabled)
+ return -EINVAL;
+
+ psm = le16_to_cpu(req->psm);
+ scid = le16_to_cpu(req->scid);
+
+ BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
+
+ /* For controller id 0 make BR/EDR connection */
+ if (req->amp_id == AMP_ID_BREDR) {
+ l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+ req->amp_id);
+ return 0;
+ }
+
+ /* Validate AMP controller id */
+ hdev = hci_dev_get(req->amp_id);
+ if (!hdev)
+ goto error;
+
+ if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
+ hci_dev_put(hdev);
+ goto error;
+ }
+
+ chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
+ req->amp_id);
+ if (chan) {
+ struct amp_mgr *mgr = conn->hcon->amp_mgr;
+ struct hci_conn *hs_hcon;
+
+ hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+ &conn->hcon->dst);
+ if (!hs_hcon) {
+ hci_dev_put(hdev);
+ cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+ chan->dcid);
+ return 0;
+ }
+
+ BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
+
+ mgr->bredr_chan = chan;
+ chan->hs_hcon = hs_hcon;
+ chan->fcs = L2CAP_FCS_NONE;
+ conn->mtu = hdev->block_mtu;
+ }
+
+ hci_dev_put(hdev);
+
+ return 0;
+
+error:
+ rsp.dcid = 0;
+ rsp.scid = cpu_to_le16(scid);
+ rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ return 0;
+}
+
+static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
+{
+ struct l2cap_move_chan_req req;
+ u8 ident;
+
+ BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
+
+ ident = l2cap_get_ident(chan->conn);
+ chan->ident = ident;
+
+ req.icid = cpu_to_le16(chan->scid);
+ req.dest_amp_id = dest_amp_id;
+
+ l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
+ &req);
+
+ __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
+}
+
+static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
+{
+ struct l2cap_move_chan_rsp rsp;
+
+ BT_DBG("chan %p, result 0x%4.4x", chan, result);
+
+ rsp.icid = cpu_to_le16(chan->dcid);
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+}
+
+static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
+{
+ struct l2cap_move_chan_cfm cfm;
+
+ BT_DBG("chan %p, result 0x%4.4x", chan, result);
+
+ chan->ident = l2cap_get_ident(chan->conn);
+
+ cfm.icid = cpu_to_le16(chan->scid);
+ cfm.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
+ sizeof(cfm), &cfm);
+
+ __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
+}
+
+static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
+{
+ struct l2cap_move_chan_cfm cfm;
+
+ BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
+
+ cfm.icid = cpu_to_le16(icid);
+ cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
+ sizeof(cfm), &cfm);
+}
+
+static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+ u16 icid)
+{
+ struct l2cap_move_chan_cfm_rsp rsp;
+
+ BT_DBG("icid 0x%4.4x", icid);
+
+ rsp.icid = cpu_to_le16(icid);
+ l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+}
+
+static void __release_logical_link(struct l2cap_chan *chan)
+{
+ chan->hs_hchan = NULL;
+ chan->hs_hcon = NULL;
+
+ /* Placeholder - release the logical link */
+}
+
+static void l2cap_logical_fail(struct l2cap_chan *chan)
+{
+ /* Logical link setup failed */
+ if (chan->state != BT_CONNECTED) {
+ /* Create channel failure, disconnect */
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return;
+ }
+
+ switch (chan->move_role) {
+ case L2CAP_MOVE_ROLE_RESPONDER:
+ l2cap_move_done(chan);
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
+ break;
+ case L2CAP_MOVE_ROLE_INITIATOR:
+ if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
+ chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
+ /* Remote has only sent pending or
+ * success responses, clean up
+ */
+ l2cap_move_done(chan);
+ }
+
+ /* Other amp move states imply that the move
+ * has already aborted
+ */
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+ break;
+ }
+}
+
+static void l2cap_logical_finish_create(struct l2cap_chan *chan,
+ struct hci_chan *hchan)
+{
+ struct l2cap_conf_rsp rsp;
+
+ chan->hs_hchan = hchan;
+ chan->hs_hcon->l2cap_data = chan->conn;
+
+ l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
+
+ if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
+ int err;
+
+ set_default_fcs(chan);
+
+ err = l2cap_ertm_init(chan);
+ if (err < 0)
+ l2cap_send_disconn_req(chan, -err);
+ else
+ l2cap_chan_ready(chan);
+ }
+}
+
+static void l2cap_logical_finish_move(struct l2cap_chan *chan,
+ struct hci_chan *hchan)
+{
+ chan->hs_hcon = hchan->conn;
+ chan->hs_hcon->l2cap_data = chan->conn;
+
+ BT_DBG("move_state %d", chan->move_state);
+
+ switch (chan->move_state) {
+ case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+ /* Move confirm will be sent after a success
+ * response is received
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+ break;
+ case L2CAP_MOVE_WAIT_LOGICAL_CFM:
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+ } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+ } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+ }
+ break;
+ default:
+ /* Move was not in expected state, free the channel */
+ __release_logical_link(chan);
+
+ chan->move_state = L2CAP_MOVE_STABLE;
+ }
+}
+
+/* Call with chan locked */
+void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
+ u8 status)
+{
+ BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
+
+ if (status) {
+ l2cap_logical_fail(chan);
+ __release_logical_link(chan);
+ return;
+ }
+
+ if (chan->state != BT_CONNECTED) {
+ /* Ignore logical link if channel is on BR/EDR */
+ if (chan->local_amp_id != AMP_ID_BREDR)
+ l2cap_logical_finish_create(chan, hchan);
+ } else {
+ l2cap_logical_finish_move(chan, hchan);
+ }
+}
+
+void l2cap_move_start(struct l2cap_chan *chan)
+{
+ BT_DBG("chan %p", chan);
+
+ if (chan->local_amp_id == AMP_ID_BREDR) {
+ if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
+ return;
+ chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+ chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+ /* Placeholder - start physical link setup */
+ } else {
+ chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
+ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+ chan->move_id = 0;
+ l2cap_move_setup(chan);
+ l2cap_send_move_chan_req(chan, 0);
+ }
+}
+
+static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ u8 local_amp_id, u8 remote_amp_id)
+{
+ BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
+ local_amp_id, remote_amp_id);
+
+ chan->fcs = L2CAP_FCS_NONE;
+
+ /* Outgoing channel on AMP */
+ if (chan->state == BT_CONNECT) {
+ if (result == L2CAP_CR_SUCCESS) {
+ chan->local_amp_id = local_amp_id;
+ l2cap_send_create_chan_req(chan, remote_amp_id);
+ } else {
+ /* Revert to BR/EDR connect */
+ l2cap_send_conn_req(chan);
+ }
+
+ return;
+ }
+
+ /* Incoming channel on AMP */
+ if (__l2cap_no_conn_pending(chan)) {
+ struct l2cap_conn_rsp rsp;
+ char buf[128];
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+
+ if (result == L2CAP_CR_SUCCESS) {
+ /* Send successful response */
+ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ } else {
+ /* Send negative response */
+ rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ }
+
+ l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (result == L2CAP_CR_SUCCESS) {
+ l2cap_state_change(chan, BT_CONFIG);
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf), buf);
+ chan->num_conf_req++;
+ }
+ }
+}
+
+static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
+ u8 remote_amp_id)
+{
+ l2cap_move_setup(chan);
+ chan->move_id = local_amp_id;
+ chan->move_state = L2CAP_MOVE_WAIT_RSP;
+
+ l2cap_send_move_chan_req(chan, remote_amp_id);
+}
+
+static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
+{
+ struct hci_chan *hchan = NULL;
+
+ /* Placeholder - get hci_chan for logical link */
+
+ if (hchan) {
+ if (hchan->state == BT_CONNECTED) {
+ /* Logical link is ready to go */
+ chan->hs_hcon = hchan->conn;
+ chan->hs_hcon->l2cap_data = chan->conn;
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
+
+ l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+ } else {
+ /* Wait for logical link to be ready */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+ }
+ } else {
+ /* Logical link not available */
+ l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
+ }
+}
+
+static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
+{
+ if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
+ u8 rsp_result;
+ if (result == -EINVAL)
+ rsp_result = L2CAP_MR_BAD_ID;
+ else
+ rsp_result = L2CAP_MR_NOT_ALLOWED;
+
+ l2cap_send_move_chan_rsp(chan, rsp_result);
+ }
+
+ chan->move_role = L2CAP_MOVE_ROLE_NONE;
+ chan->move_state = L2CAP_MOVE_STABLE;
+
+ /* Restart data transmission */
+ l2cap_ertm_send(chan);
+}
+
+/* Invoke with locked chan */
+void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
+{
+ u8 local_amp_id = chan->local_amp_id;
+ u8 remote_amp_id = chan->remote_amp_id;
+
+ BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
+ chan, result, local_amp_id, remote_amp_id);
+
+ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
+ l2cap_chan_unlock(chan);
+ return;
+ }
+
+ if (chan->state != BT_CONNECTED) {
+ l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
+ } else if (result != L2CAP_MR_SUCCESS) {
+ l2cap_do_move_cancel(chan, result);
+ } else {
+ switch (chan->move_role) {
+ case L2CAP_MOVE_ROLE_INITIATOR:
+ l2cap_do_move_initiate(chan, local_amp_id,
+ remote_amp_id);
+ break;
+ case L2CAP_MOVE_ROLE_RESPONDER:
+ l2cap_do_move_respond(chan, result);
+ break;
+ default:
+ l2cap_do_move_cancel(chan, result);
+ break;
+ }
+ }
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_req *req = data;
+ struct l2cap_move_chan_rsp rsp;
+ struct l2cap_chan *chan;
+ u16 icid = 0;
+ u16 result = L2CAP_MR_NOT_ALLOWED;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ icid = le16_to_cpu(req->icid);
+
+ BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
+
+ if (!conn->hs_enabled)
+ return -EINVAL;
+
+ chan = l2cap_get_chan_by_dcid(conn, icid);
+ if (!chan) {
+ rsp.icid = cpu_to_le16(icid);
+ rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
+ sizeof(rsp), &rsp);
+ return 0;
+ }
+
+ chan->ident = cmd->ident;
+
+ if (chan->scid < L2CAP_CID_DYN_START ||
+ chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
+ (chan->mode != L2CAP_MODE_ERTM &&
+ chan->mode != L2CAP_MODE_STREAMING)) {
+ result = L2CAP_MR_NOT_ALLOWED;
+ goto send_move_response;
+ }
+
+ if (chan->local_amp_id == req->dest_amp_id) {
+ result = L2CAP_MR_SAME_ID;
+ goto send_move_response;
+ }
+
+ if (req->dest_amp_id != AMP_ID_BREDR) {
+ struct hci_dev *hdev;
+ hdev = hci_dev_get(req->dest_amp_id);
+ if (!hdev || hdev->dev_type != HCI_AMP ||
+ !test_bit(HCI_UP, &hdev->flags)) {
+ if (hdev)
+ hci_dev_put(hdev);
+
+ result = L2CAP_MR_BAD_ID;
+ goto send_move_response;
+ }
+ hci_dev_put(hdev);
+ }
+
+ /* Detect a move collision. Only send a collision response
+ * if this side has "lost", otherwise proceed with the move.
+ * The winner has the larger bd_addr.
+ */
+ if ((__chan_is_moving(chan) ||
+ chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
+ bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
+ result = L2CAP_MR_COLLISION;
+ goto send_move_response;
+ }
+
+ chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+ l2cap_move_setup(chan);
+ chan->move_id = req->dest_amp_id;
+ icid = chan->dcid;
+
+ if (req->dest_amp_id == AMP_ID_BREDR) {
+ /* Moving to BR/EDR */
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+ result = L2CAP_MR_PEND;
+ } else {
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
+ result = L2CAP_MR_SUCCESS;
+ }
+ } else {
+ chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
+ /* Placeholder - uncomment when amp functions are available */
+ /*amp_accept_physical(chan, req->dest_amp_id);*/
+ result = L2CAP_MR_PEND;
+ }
+
+send_move_response:
+ l2cap_send_move_chan_rsp(chan, result);
+
+ l2cap_chan_unlock(chan);
+
+ return 0;
+}
+
+static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
+{
+ struct l2cap_chan *chan;
+ struct hci_chan *hchan = NULL;
+
+ chan = l2cap_get_chan_by_scid(conn, icid);
+ if (!chan) {
+ l2cap_send_move_chan_cfm_icid(conn, icid);
+ return;
+ }
+
+ __clear_chan_timer(chan);
+ if (result == L2CAP_MR_PEND)
+ __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
+
+ switch (chan->move_state) {
+ case L2CAP_MOVE_WAIT_LOGICAL_COMP:
+ /* Move confirm will be sent when logical link
+ * is complete.
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+ break;
+ case L2CAP_MOVE_WAIT_RSP_SUCCESS:
+ if (result == L2CAP_MR_PEND) {
+ break;
+ } else if (test_bit(CONN_LOCAL_BUSY,
+ &chan->conn_state)) {
+ chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
+ } else {
+ /* Logical link is up or moving to BR/EDR,
+ * proceed with move
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+ }
+ break;
+ case L2CAP_MOVE_WAIT_RSP:
+ /* Moving to AMP */
+ if (result == L2CAP_MR_SUCCESS) {
+ /* Remote is ready, send confirm immediately
+ * after logical link is ready
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
+ } else {
+ /* Both logical link and move success
+ * are required to confirm
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
+ }
+
+ /* Placeholder - get hci_chan for logical link */
+ if (!hchan) {
+ /* Logical link not available */
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+ break;
+ }
+
+ /* If the logical link is not yet connected, do not
+ * send confirmation.
+ */
+ if (hchan->state != BT_CONNECTED)
+ break;
+
+ /* Logical link is already ready to go */
+
+ chan->hs_hcon = hchan->conn;
+ chan->hs_hcon->l2cap_data = chan->conn;
+
+ if (result == L2CAP_MR_SUCCESS) {
+ /* Can confirm now */
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
+ } else {
+ /* Now only need move success
+ * to confirm
+ */
+ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
+ }
+
+ l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
+ break;
+ default:
+ /* Any other amp move state means the move failed. */
+ chan->move_id = chan->local_amp_id;
+ l2cap_move_done(chan);
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+ }
+
+ l2cap_chan_unlock(chan);
+}
+
+static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
+ u16 result)
+{
+ struct l2cap_chan *chan;
+
+ chan = l2cap_get_chan_by_ident(conn, ident);
+ if (!chan) {
+ /* Could not locate channel, icid is best guess */
+ l2cap_send_move_chan_cfm_icid(conn, icid);
+ return;
+ }
+
+ __clear_chan_timer(chan);
+
+ if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
+ if (result == L2CAP_MR_COLLISION) {
+ chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
+ } else {
+ /* Cleanup - cancel move */
+ chan->move_id = chan->local_amp_id;
+ l2cap_move_done(chan);
+ }
+ }
+
+ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
+
+ l2cap_chan_unlock(chan);
+}
+
+static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_rsp *rsp = data;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+ result = le16_to_cpu(rsp->result);
+
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
+
+ if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
+ l2cap_move_continue(conn, icid, result);
+ else
+ l2cap_move_fail(conn, cmd->ident, icid, result);
+
+ return 0;
+}
+
+static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm *cfm = data;
+ struct l2cap_chan *chan;
+ u16 icid, result;
+
+ if (cmd_len != sizeof(*cfm))
+ return -EPROTO;
+
+ icid = le16_to_cpu(cfm->icid);
+ result = le16_to_cpu(cfm->result);
+
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
+
+ chan = l2cap_get_chan_by_dcid(conn, icid);
+ if (!chan) {
+ /* Spec requires a response even if the icid was not found */
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+ return 0;
+ }
+
+ if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
+ if (result == L2CAP_MC_CONFIRMED) {
+ chan->local_amp_id = chan->move_id;
+ if (chan->local_amp_id == AMP_ID_BREDR)
+ __release_logical_link(chan);
+ } else {
+ chan->move_id = chan->local_amp_id;
+ }
+
+ l2cap_move_done(chan);
+ }
+
+ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+ l2cap_chan_unlock(chan);
+
+ return 0;
+}
+
+static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
+{
+ struct l2cap_move_chan_cfm_rsp *rsp = data;
+ struct l2cap_chan *chan;
+ u16 icid;
+
+ if (cmd_len != sizeof(*rsp))
+ return -EPROTO;
+
+ icid = le16_to_cpu(rsp->icid);
+
+ BT_DBG("icid 0x%4.4x", icid);
+
+ chan = l2cap_get_chan_by_scid(conn, icid);
+ if (!chan)
+ return 0;
+
+ __clear_chan_timer(chan);
+
+ if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
+ chan->local_amp_id = chan->move_id;
+
+ if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
+ __release_logical_link(chan);
+
+ l2cap_move_done(chan);
+ }
+
+ l2cap_chan_unlock(chan);
+
+ return 0;
+}
+
+static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
+ u16 to_multiplier)
+{
+ u16 max_latency;
+
+ if (min > max || min < 6 || max > 3200)
+ return -EINVAL;
+
+ if (to_multiplier < 10 || to_multiplier > 3200)
+ return -EINVAL;
+
+ if (max >= to_multiplier * 8)
+ return -EINVAL;
+
+ max_latency = (to_multiplier * 8 / max) - 1;
+ if (latency > 499 || latency > max_latency)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, u8 *data)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_conn_param_update_req *req;
+ struct l2cap_conn_param_update_rsp rsp;
+ u16 min, max, latency, to_multiplier;
+ int err;
+
+ if (!(hcon->link_mode & HCI_LM_MASTER))
+ return -EINVAL;
+
+ if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
+ return -EPROTO;
+
+ req = (struct l2cap_conn_param_update_req *) data;
+ min = __le16_to_cpu(req->min);
+ max = __le16_to_cpu(req->max);
+ latency = __le16_to_cpu(req->latency);
+ to_multiplier = __le16_to_cpu(req->to_multiplier);
+
+ BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
+ min, max, latency, to_multiplier);
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ err = l2cap_check_conn_param(min, max, latency, to_multiplier);
+ if (err)
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ else
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
+ sizeof(rsp), &rsp);
+
+ if (!err)
+ hci_le_conn_update(hcon, min, max, latency, to_multiplier);
+
+ return 0;
+}
+
+static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
+ u16 dcid, mtu, mps, credits, result;
+ struct l2cap_chan *chan;
+ int err;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ dcid = __le16_to_cpu(rsp->dcid);
+ mtu = __le16_to_cpu(rsp->mtu);
+ mps = __le16_to_cpu(rsp->mps);
+ credits = __le16_to_cpu(rsp->credits);
+ result = __le16_to_cpu(rsp->result);
+
+ if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
+ return -EPROTO;
+
+ BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
+ dcid, mtu, mps, credits, result);
+
+ mutex_lock(&conn->chan_lock);
+
+ chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+ if (!chan) {
+ err = -EBADSLT;
+ goto unlock;
+ }
+
+ err = 0;
+
+ l2cap_chan_lock(chan);
+
+ switch (result) {
+ case L2CAP_CR_SUCCESS:
+ chan->ident = 0;
+ chan->dcid = dcid;
+ chan->omtu = mtu;
+ chan->remote_mps = mps;
+ chan->tx_credits = credits;
+ l2cap_chan_ready(chan);
+ break;
+
+ default:
+ l2cap_chan_del(chan, ECONNREFUSED);
+ break;
+ }
+
+ l2cap_chan_unlock(chan);
+
+unlock:
+ mutex_unlock(&conn->chan_lock);
+
+ return err;
+}
+
+static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ int err = 0;
+
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+ l2cap_command_rej(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_REQ:
+ err = l2cap_connect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_RSP:
+ case L2CAP_CREATE_CHAN_RSP:
+ l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONF_REQ:
+ err = l2cap_config_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONF_RSP:
+ l2cap_config_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_REQ:
+ err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_RSP:
+ l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_ECHO_REQ:
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
+ break;
+
+ case L2CAP_ECHO_RSP:
+ break;
+
+ case L2CAP_INFO_REQ:
+ err = l2cap_information_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_INFO_RSP:
+ l2cap_information_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CREATE_CHAN_REQ:
+ err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_REQ:
+ err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_RSP:
+ l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM:
+ err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_MOVE_CHAN_CFM_RSP:
+ l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ default:
+ BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int l2cap_le_connect_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
+ struct l2cap_le_conn_rsp rsp;
+ struct l2cap_chan *chan, *pchan;
+ u16 dcid, scid, credits, mtu, mps;
+ __le16 psm;
+ u8 result;
+
+ if (cmd_len != sizeof(*req))
+ return -EPROTO;
+
+ scid = __le16_to_cpu(req->scid);
+ mtu = __le16_to_cpu(req->mtu);
+ mps = __le16_to_cpu(req->mps);
+ psm = req->psm;
+ dcid = 0;
+ credits = 0;
+
+ if (mtu < 23 || mps < 23)
+ return -EPROTO;
+
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
+ scid, mtu, mps);
+
+ /* Check if we have socket listening on psm */
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
+ &conn->hcon->dst, LE_LINK);
+ if (!pchan) {
+ result = L2CAP_CR_BAD_PSM;
+ chan = NULL;
+ goto response;
+ }
+
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(pchan);
+
+ if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
+ result = L2CAP_CR_AUTHENTICATION;
+ chan = NULL;
+ goto response_unlock;
+ }
+
+ /* Check if we already have channel with that dcid */
+ if (__l2cap_get_chan_by_dcid(conn, scid)) {
+ result = L2CAP_CR_NO_MEM;
+ chan = NULL;
+ goto response_unlock;
+ }
+
+ chan = pchan->ops->new_connection(pchan);
+ if (!chan) {
+ result = L2CAP_CR_NO_MEM;
+ goto response_unlock;
+ }
+
+ l2cap_le_flowctl_init(chan);
+
+ bacpy(&chan->src, &conn->hcon->src);
+ bacpy(&chan->dst, &conn->hcon->dst);
+ chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+ chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
+ chan->psm = psm;
+ chan->dcid = scid;
+ chan->omtu = mtu;
+ chan->remote_mps = mps;
+ chan->tx_credits = __le16_to_cpu(req->credits);
+
+ __l2cap_chan_add(conn, chan);
+ dcid = chan->scid;
+ credits = chan->rx_credits;
+
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+
+ chan->ident = cmd->ident;
+
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ l2cap_state_change(chan, BT_CONNECT2);
+ result = L2CAP_CR_PEND;
+ chan->ops->defer(chan);
+ } else {
+ l2cap_chan_ready(chan);
+ result = L2CAP_CR_SUCCESS;
+ }
+
+response_unlock:
+ l2cap_chan_unlock(pchan);
+ mutex_unlock(&conn->chan_lock);
+
+ if (result == L2CAP_CR_PEND)
+ return 0;
+
+response:
+ if (chan) {
+ rsp.mtu = cpu_to_le16(chan->imtu);
+ rsp.mps = cpu_to_le16(chan->mps);
+ } else {
+ rsp.mtu = 0;
+ rsp.mps = 0;
+ }
+
+ rsp.dcid = cpu_to_le16(dcid);
+ rsp.credits = cpu_to_le16(credits);
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
+
+ return 0;
+}
+
+static inline int l2cap_le_credits(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_le_credits *pkt;
+ struct l2cap_chan *chan;
+ u16 cid, credits, max_credits;
+
+ if (cmd_len != sizeof(*pkt))
+ return -EPROTO;
+
+ pkt = (struct l2cap_le_credits *) data;
+ cid = __le16_to_cpu(pkt->cid);
+ credits = __le16_to_cpu(pkt->credits);
+
+ BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
+
+ chan = l2cap_get_chan_by_dcid(conn, cid);
+ if (!chan)
+ return -EBADSLT;
+
+ max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
+ if (credits > max_credits) {
+ BT_ERR("LE credits overflow");
+ l2cap_send_disconn_req(chan, ECONNRESET);
+
+ /* Return 0 so that we don't trigger an unnecessary
+ * command reject packet.
+ */
+ return 0;
+ }
+
+ chan->tx_credits += credits;
+
+ while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
+ l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
+ chan->tx_credits--;
+ }
+
+ if (chan->tx_credits)
+ chan->ops->resume(chan);
+
+ l2cap_chan_unlock(chan);
+
+ return 0;
+}
+
+static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
+ struct l2cap_chan *chan;
+
+ if (cmd_len < sizeof(*rej))
+ return -EPROTO;
+
+ mutex_lock(&conn->chan_lock);
+
+ chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
+ if (!chan)
+ goto done;
+
+ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_unlock(chan);
+
+done:
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+}
+
+static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ int err = 0;
+
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+ l2cap_le_command_rej(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_PARAM_UPDATE_REQ:
+ err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_PARAM_UPDATE_RSP:
+ break;
+
+ case L2CAP_LE_CONN_RSP:
+ l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_LE_CONN_REQ:
+ err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_LE_CREDITS:
+ err = l2cap_le_credits(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_REQ:
+ err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_RSP:
+ l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ default:
+ BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_cmd_hdr *cmd;
+ u16 len;
+ int err;
+
+ if (hcon->type != LE_LINK)
+ goto drop;
+
+ if (skb->len < L2CAP_CMD_HDR_SIZE)
+ goto drop;
+
+ cmd = (void *) skb->data;
+ skb_pull(skb, L2CAP_CMD_HDR_SIZE);
+
+ len = le16_to_cpu(cmd->len);
+
+ BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
+
+ if (len != skb->len || !cmd->ident) {
+ BT_DBG("corrupted command");
+ goto drop;
+ }
+
+ err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
+ if (err) {
+ struct l2cap_cmd_rej_unk rej;
+
+ BT_ERR("Wrong link type (%d)", err);
+
+ rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+ sizeof(rej), &rej);
+ }
+
+drop:
+ kfree_skb(skb);
+}
+
+static inline void l2cap_sig_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct hci_conn *hcon = conn->hcon;
+ u8 *data = skb->data;
+ int len = skb->len;
+ struct l2cap_cmd_hdr cmd;
+ int err;
+
+ l2cap_raw_recv(conn, skb);
+
+ if (hcon->type != ACL_LINK)
+ goto drop;
+
+ while (len >= L2CAP_CMD_HDR_SIZE) {
+ u16 cmd_len;
+ memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
+ data += L2CAP_CMD_HDR_SIZE;
+ len -= L2CAP_CMD_HDR_SIZE;
+
+ cmd_len = le16_to_cpu(cmd.len);
+
+ BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
+ cmd.ident);
+
+ if (cmd_len > len || !cmd.ident) {
+ BT_DBG("corrupted command");
+ break;
+ }
+
+ err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
+ if (err) {
+ struct l2cap_cmd_rej_unk rej;
+
+ BT_ERR("Wrong link type (%d)", err);
+
+ rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
+ sizeof(rej), &rej);
+ }
+
+ data += cmd_len;
+ len -= cmd_len;
+ }
+
+drop:
+ kfree_skb(skb);
+}
+
+static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ u16 our_fcs, rcv_fcs;
+ int hdr_size;
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ hdr_size = L2CAP_EXT_HDR_SIZE;
+ else
+ hdr_size = L2CAP_ENH_HDR_SIZE;
+
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
+ rcv_fcs = get_unaligned_le16(skb->data + skb->len);
+ our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
+
+ if (our_fcs != rcv_fcs)
+ return -EBADMSG;
+ }
+ return 0;
+}
+
+static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p", chan);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.final = 1;
+ control.reqseq = chan->buffer_seq;
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ control.super = L2CAP_SUPER_RNR;
+ l2cap_send_sframe(chan, &control);
+ }
+
+ if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
+ chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
+
+ /* Send pending iframes */
+ l2cap_ertm_send(chan);
+
+ if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
+ test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
+ /* F-bit wasn't sent in an s-frame or i-frame yet, so
+ * send it now.
+ */
+ control.super = L2CAP_SUPER_RR;
+ l2cap_send_sframe(chan, &control);
+ }
+}
+
+static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
+ struct sk_buff **last_frag)
+{
+ /* skb->len reflects data in skb as well as all fragments
+ * skb->data_len reflects only data in fragments
+ */
+ if (!skb_has_frag_list(skb))
+ skb_shinfo(skb)->frag_list = new_frag;
+
+ new_frag->next = NULL;
+
+ (*last_frag)->next = new_frag;
+ *last_frag = new_frag;
+
+ skb->len += new_frag->len;
+ skb->data_len += new_frag->len;
+ skb->truesize += new_frag->truesize;
+}
+
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
+ struct l2cap_ctrl *control)
+{
+ int err = -EINVAL;
+
+ switch (control->sar) {
+ case L2CAP_SAR_UNSEGMENTED:
+ if (chan->sdu)
+ break;
+
+ err = chan->ops->recv(chan, skb);
+ break;
+
+ case L2CAP_SAR_START:
+ if (chan->sdu)
+ break;
+
+ chan->sdu_len = get_unaligned_le16(skb->data);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+
+ if (chan->sdu_len > chan->imtu) {
+ err = -EMSGSIZE;
+ break;
+ }
+
+ if (skb->len >= chan->sdu_len)
+ break;
+
+ chan->sdu = skb;
+ chan->sdu_last_frag = skb;
+
+ skb = NULL;
+ err = 0;
+ break;
+
+ case L2CAP_SAR_CONTINUE:
+ if (!chan->sdu)
+ break;
+
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
+ skb = NULL;
+
+ if (chan->sdu->len >= chan->sdu_len)
+ break;
+
+ err = 0;
+ break;
+
+ case L2CAP_SAR_END:
+ if (!chan->sdu)
+ break;
+
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
+ skb = NULL;
+
+ if (chan->sdu->len != chan->sdu_len)
+ break;
+
+ err = chan->ops->recv(chan, chan->sdu);
+
+ if (!err) {
+ /* Reassembly complete */
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+ }
+ break;
+ }
+
+ if (err) {
+ kfree_skb(skb);
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+ }
+
+ return err;
+}
+
+static int l2cap_resegment(struct l2cap_chan *chan)
+{
+ /* Placeholder */
+ return 0;
+}
+
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
+{
+ u8 event;
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
+
+ event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
+ l2cap_tx(chan, NULL, NULL, event);
+}
+
+static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
+{
+ int err = 0;
+ /* Pass sequential frames to l2cap_reassemble_sdu()
+ * until a gap is encountered.
+ */
+
+ BT_DBG("chan %p", chan);
+
+ while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ struct sk_buff *skb;
+ BT_DBG("Searching for skb with txseq %d (queue len %d)",
+ chan->buffer_seq, skb_queue_len(&chan->srej_q));
+
+ skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
+
+ if (!skb)
+ break;
+
+ skb_unlink(skb, &chan->srej_q);
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+ err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
+ if (err)
+ break;
+ }
+
+ if (skb_queue_empty(&chan->srej_q)) {
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ l2cap_send_ack(chan);
+ }
+
+ return err;
+}
+
+static void l2cap_handle_srej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("chan %p, control %p", chan, control);
+
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return;
+ }
+
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
+
+ if (skb == NULL) {
+ BT_DBG("Seq %d not available for retransmission",
+ control->reqseq);
+ return;
+ }
+
+ if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return;
+ }
+
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ if (control->poll) {
+ l2cap_pass_to_tx(chan, control);
+
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_retransmit(chan, control);
+ l2cap_ertm_send(chan);
+
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
+ }
+ } else {
+ l2cap_pass_to_tx_fbit(chan, control);
+
+ if (control->final) {
+ if (chan->srej_save_reqseq != control->reqseq ||
+ !test_and_clear_bit(CONN_SREJ_ACT,
+ &chan->conn_state))
+ l2cap_retransmit(chan, control);
+ } else {
+ l2cap_retransmit(chan, control);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
+ }
+ }
+ }
+}
+
+static void l2cap_handle_rej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("chan %p, control %p", chan, control);
+
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return;
+ }
+
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
+
+ if (chan->max_tx && skb &&
+ bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return;
+ }
+
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ l2cap_pass_to_tx(chan, control);
+
+ if (control->final) {
+ if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
+ l2cap_retransmit_all(chan, control);
+ } else {
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
+ set_bit(CONN_REJ_ACT, &chan->conn_state);
+ }
+}
+
+static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
+{
+ BT_DBG("chan %p, txseq %d", chan, txseq);
+
+ BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
+ chan->expected_tx_seq);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ /* See notes below regarding "double poll" and
+ * invalid packets.
+ */
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - after SREJ");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - in window after SREJ sent");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ }
+
+ if (chan->srej_list.head == txseq) {
+ BT_DBG("Expected SREJ");
+ return L2CAP_TXSEQ_EXPECTED_SREJ;
+ }
+
+ if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
+ BT_DBG("Duplicate SREJ - txseq already stored");
+ return L2CAP_TXSEQ_DUPLICATE_SREJ;
+ }
+
+ if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
+ BT_DBG("Unexpected SREJ - not requested");
+ return L2CAP_TXSEQ_UNEXPECTED_SREJ;
+ }
+ }
+
+ if (chan->expected_tx_seq == txseq) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ } else {
+ BT_DBG("Expected");
+ return L2CAP_TXSEQ_EXPECTED;
+ }
+ }
+
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) <
+ __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
+ BT_DBG("Duplicate - expected_tx_seq later than txseq");
+ return L2CAP_TXSEQ_DUPLICATE;
+ }
+
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
+ /* A source of invalid packets is a "double poll" condition,
+ * where delays cause us to send multiple poll packets. If
+ * the remote stack receives and processes both polls,
+ * sequence numbers can wrap around in such a way that a
+ * resent frame has a sequence number that looks like new data
+ * with a sequence gap. This would trigger an erroneous SREJ
+ * request.
+ *
+ * Fortunately, this is impossible with a tx window that's
+ * less than half of the maximum sequence number, which allows
+ * invalid frames to be safely ignored.
+ *
+ * With tx window sizes greater than half of the tx window
+ * maximum, the frame is invalid and cannot be ignored. This
+ * causes a disconnect.
+ */
+
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ } else {
+ BT_DBG("Unexpected - txseq indicates missing frames");
+ return L2CAP_TXSEQ_UNEXPECTED;
+ }
+}
+
+static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
+ bool skb_in_use = false;
+
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, control->txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
+ l2cap_pass_to_tx(chan, control);
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ BT_DBG("Busy, discarding expected seq %d",
+ control->txseq);
+ break;
+ }
+
+ chan->expected_tx_seq = __next_seq(chan,
+ control->txseq);
+
+ chan->buffer_seq = chan->expected_tx_seq;
+ skb_in_use = true;
+
+ err = l2cap_reassemble_sdu(chan, skb, control);
+ if (err)
+ break;
+
+ if (control->final) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
+ }
+ }
+
+ if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ l2cap_send_ack(chan);
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED:
+ l2cap_pass_to_tx(chan, control);
+
+ /* Can't issue SREJ frames in the local busy state.
+ * Drop this frame, it will be seen as missing
+ * when local busy is exited.
+ */
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ BT_DBG("Busy, discarding unexpected seq %d",
+ control->txseq);
+ break;
+ }
+
+ /* There was a gap in the sequence, so an SREJ
+ * must be sent for each missing frame. The
+ * current frame is stored for later use.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = true;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
+ l2cap_send_srej(chan, control->txseq);
+
+ chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
+ break;
+ case L2CAP_TXSEQ_DUPLICATE:
+ l2cap_pass_to_tx(chan, control);
+ break;
+ case L2CAP_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
+ if (control->final) {
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
+ !__chan_is_moving(chan)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ }
+
+ l2cap_ertm_send(chan);
+ } else if (control->poll) {
+ l2cap_send_i_or_rr_or_rnr(chan);
+ } else {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
+ __set_retrans_timer(chan);
+
+ l2cap_ertm_send(chan);
+ }
+ break;
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
+ if (control && control->poll) {
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_rr_or_rnr(chan, 0);
+ }
+ __clear_retrans_timer(chan);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
+ break;
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
+ break;
+ default:
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+
+ return err;
+}
+
+static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
+ u16 txseq = control->txseq;
+ bool skb_in_use = false;
+
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
+ /* Keep frame for reassembly later */
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = true;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ chan->expected_tx_seq = __next_seq(chan, txseq);
+ break;
+ case L2CAP_TXSEQ_EXPECTED_SREJ:
+ l2cap_seq_list_pop(&chan->srej_list);
+
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = true;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ err = l2cap_rx_queued_iframes(chan);
+ if (err)
+ break;
+
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED:
+ /* Got a frame that can't be reassembled yet.
+ * Save it for later, and send SREJs to cover
+ * the missing frames.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = true;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej(chan, control->txseq);
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED_SREJ:
+ /* This frame was requested with an SREJ, but
+ * some expected retransmitted frames are
+ * missing. Request retransmission of missing
+ * SREJ'd frames.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = true;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej_list(chan, control->txseq);
+ break;
+ case L2CAP_TXSEQ_DUPLICATE_SREJ:
+ /* We've already queued this frame. Drop this copy. */
+ l2cap_pass_to_tx(chan, control);
+ break;
+ case L2CAP_TXSEQ_DUPLICATE:
+ /* Expecting a later sequence number, so this frame
+ * was already received. Ignore it completely.
+ */
+ break;
+ case L2CAP_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
+ if (control->final) {
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ }
+
+ l2cap_ertm_send(chan);
+ } else if (control->poll) {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames) {
+ __set_retrans_timer(chan);
+ }
+
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_srej_tail(chan);
+ } else {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
+ __set_retrans_timer(chan);
+
+ l2cap_send_ack(chan);
+ }
+ break;
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
+ if (control->poll) {
+ l2cap_send_srej_tail(chan);
+ } else {
+ struct l2cap_ctrl rr_control;
+ memset(&rr_control, 0, sizeof(rr_control));
+ rr_control.sframe = 1;
+ rr_control.super = L2CAP_SUPER_RR;
+ rr_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &rr_control);
+ }
+
+ break;
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
+ break;
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
+ break;
+ }
+
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+
+ return err;
+}
+
+static int l2cap_finish_move(struct l2cap_chan *chan)
+{
+ BT_DBG("chan %p", chan);
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+
+ if (chan->hs_hcon)
+ chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
+ else
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+
+ return l2cap_resegment(chan);
+}
+
+static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err;
+
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
+
+ if (!control->poll)
+ return -EPROTO;
+
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ if (!skb_queue_empty(&chan->tx_q))
+ chan->tx_send_head = skb_peek(&chan->tx_q);
+ else
+ chan->tx_send_head = NULL;
+
+ /* Rewind next_tx_seq to the point expected
+ * by the receiver.
+ */
+ chan->next_tx_seq = control->reqseq;
+ chan->unacked_frames = 0;
+
+ err = l2cap_finish_move(chan);
+ if (err)
+ return err;
+
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_i_or_rr_or_rnr(chan);
+
+ if (event == L2CAP_EV_RECV_IFRAME)
+ return -EPROTO;
+
+ return l2cap_rx_state_recv(chan, control, NULL, event);
+}
+
+static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err;
+
+ if (!control->final)
+ return -EPROTO;
+
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ if (!skb_queue_empty(&chan->tx_q))
+ chan->tx_send_head = skb_peek(&chan->tx_q);
+ else
+ chan->tx_send_head = NULL;
+
+ /* Rewind next_tx_seq to the point expected
+ * by the receiver.
+ */
+ chan->next_tx_seq = control->reqseq;
+ chan->unacked_frames = 0;
+
+ if (chan->hs_hcon)
+ chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
+ else
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+
+ err = l2cap_resegment(chan);
+
+ if (!err)
+ err = l2cap_rx_state_recv(chan, control, skb, event);
+
+ return err;
+}
+
+static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
+{
+ /* Make sure reqseq is for a packet that has been sent but not acked */
+ u16 unacked;
+
+ unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
+ return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
+}
+
+static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
+
+ BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
+ control, skb, event, chan->rx_state);
+
+ if (__valid_reqseq(chan, control->reqseq)) {
+ switch (chan->rx_state) {
+ case L2CAP_RX_STATE_RECV:
+ err = l2cap_rx_state_recv(chan, control, skb, event);
+ break;
+ case L2CAP_RX_STATE_SREJ_SENT:
+ err = l2cap_rx_state_srej_sent(chan, control, skb,
+ event);
+ break;
+ case L2CAP_RX_STATE_WAIT_P:
+ err = l2cap_rx_state_wait_p(chan, control, skb, event);
+ break;
+ case L2CAP_RX_STATE_WAIT_F:
+ err = l2cap_rx_state_wait_f(chan, control, skb, event);
+ break;
+ default:
+ /* shut it down */
+ break;
+ }
+ } else {
+ BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
+ control->reqseq, chan->next_tx_seq,
+ chan->expected_ack_seq);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ }
+
+ return err;
+}
+
+static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+{
+ int err = 0;
+
+ BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+ chan->rx_state);
+
+ if (l2cap_classify_txseq(chan, control->txseq) ==
+ L2CAP_TXSEQ_EXPECTED) {
+ l2cap_pass_to_tx(chan, control);
+
+ BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
+ __next_seq(chan, chan->buffer_seq));
+
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+
+ l2cap_reassemble_sdu(chan, skb, control);
+ } else {
+ if (chan->sdu) {
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ }
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ if (skb) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
+ }
+
+ chan->last_acked_seq = control->txseq;
+ chan->expected_tx_seq = __next_seq(chan, control->txseq);
+
+ return err;
+}
+
+static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct l2cap_ctrl *control = &bt_cb(skb)->control;
+ u16 len;
+ u8 event;
+
+ __unpack_control(chan, skb);
+
+ len = skb->len;
+
+ /*
+ * We can just drop the corrupted I-frame here.
+ * Receiver will miss it and start proper recovery
+ * procedures and ask for retransmission.
+ */
+ if (l2cap_check_fcs(chan, skb))
+ goto drop;
+
+ if (!control->sframe && control->sar == L2CAP_SAR_START)
+ len -= L2CAP_SDULEN_SIZE;
+
+ if (chan->fcs == L2CAP_FCS_CRC16)
+ len -= L2CAP_FCS_SIZE;
+
+ if (len > chan->mps) {
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto drop;
+ }
+
+ if (!control->sframe) {
+ int err;
+
+ BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
+ control->sar, control->reqseq, control->final,
+ control->txseq);
+
+ /* Validate F-bit - F=0 always valid, F=1 only
+ * valid in TX WAIT_F
+ */
+ if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
+ goto drop;
+
+ if (chan->mode != L2CAP_MODE_STREAMING) {
+ event = L2CAP_EV_RECV_IFRAME;
+ err = l2cap_rx(chan, control, skb, event);
+ } else {
+ err = l2cap_stream_rx(chan, control, skb);
+ }
+
+ if (err)
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ } else {
+ const u8 rx_func_to_event[4] = {
+ L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
+ };
+
+ /* Only I-frames are expected in streaming mode */
+ if (chan->mode == L2CAP_MODE_STREAMING)
+ goto drop;
+
+ BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
+ control->reqseq, control->final, control->poll,
+ control->super);
+
+ if (len != 0) {
+ BT_ERR("Trailing bytes: %d in sframe", len);
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ goto drop;
+ }
+
+ /* Validate F and P bits */
+ if (control->final && (control->poll ||
+ chan->tx_state != L2CAP_TX_STATE_WAIT_F))
+ goto drop;
+
+ event = rx_func_to_event[control->super];
+ if (l2cap_rx(chan, control, skb, event))
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ }
+
+ return 0;
+
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_le_credits pkt;
+ u16 return_credits;
+
+ /* We return more credits to the sender only after the amount of
+ * credits falls below half of the initial amount.
+ */
+ if (chan->rx_credits >= (le_max_credits + 1) / 2)
+ return;
+
+ return_credits = le_max_credits - chan->rx_credits;
+
+ BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
+
+ chan->rx_credits += return_credits;
+
+ pkt.cid = cpu_to_le16(chan->scid);
+ pkt.credits = cpu_to_le16(return_credits);
+
+ chan->ident = l2cap_get_ident(conn);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
+}
+
+static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ int err;
+
+ if (!chan->rx_credits) {
+ BT_ERR("No credits to receive LE L2CAP data");
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return -ENOBUFS;
+ }
+
+ if (chan->imtu < skb->len) {
+ BT_ERR("Too big LE L2CAP PDU");
+ return -ENOBUFS;
+ }
+
+ chan->rx_credits--;
+ BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
+
+ l2cap_chan_le_send_credits(chan);
+
+ err = 0;
+
+ if (!chan->sdu) {
+ u16 sdu_len;
+
+ sdu_len = get_unaligned_le16(skb->data);
+ skb_pull(skb, L2CAP_SDULEN_SIZE);
+
+ BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
+ sdu_len, skb->len, chan->imtu);
+
+ if (sdu_len > chan->imtu) {
+ BT_ERR("Too big LE L2CAP SDU length received");
+ err = -EMSGSIZE;
+ goto failed;
+ }
+
+ if (skb->len > sdu_len) {
+ BT_ERR("Too much LE L2CAP data received");
+ err = -EINVAL;
+ goto failed;
+ }
+
+ if (skb->len == sdu_len)
+ return chan->ops->recv(chan, skb);
+
+ chan->sdu = skb;
+ chan->sdu_len = sdu_len;
+ chan->sdu_last_frag = skb;
+
+ return 0;
+ }
+
+ BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
+ chan->sdu->len, skb->len, chan->sdu_len);
+
+ if (chan->sdu->len + skb->len > chan->sdu_len) {
+ BT_ERR("Too much LE L2CAP data received");
+ err = -EINVAL;
+ goto failed;
+ }
+
+ append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
+ skb = NULL;
+
+ if (chan->sdu->len == chan->sdu_len) {
+ err = chan->ops->recv(chan, chan->sdu);
+ if (!err) {
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+ }
+ }
+
+failed:
+ if (err) {
+ kfree_skb(skb);
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+ }
+
+ /* We can't return an error here since we took care of the skb
+ * freeing internally. An error return would cause the caller to
+ * do a double-free of the skb.
+ */
+ return 0;
+}
+
+static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
+{
+ struct l2cap_chan *chan;
+
+ chan = l2cap_get_chan_by_scid(conn, cid);
+ if (!chan) {
+ if (cid == L2CAP_CID_A2MP) {
+ chan = a2mp_channel_create(conn, skb);
+ if (!chan) {
+ kfree_skb(skb);
+ return;
+ }
+
+ l2cap_chan_lock(chan);
+ } else {
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
+ }
+ }
+
+ BT_DBG("chan %p, len %d", chan, skb->len);
+
+ if (chan->state != BT_CONNECTED)
+ goto drop;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_LE_FLOWCTL:
+ if (l2cap_le_data_rcv(chan, skb) < 0)
+ goto drop;
+
+ goto done;
+
+ case L2CAP_MODE_BASIC:
+ /* If socket recv buffers overflows we drop data here
+ * which is *bad* because L2CAP has to be reliable.
+ * But we don't have any other choice. L2CAP doesn't
+ * provide flow control mechanism. */
+
+ if (chan->imtu < skb->len) {
+ BT_ERR("Dropping L2CAP data: receive buffer overflow");
+ goto drop;
+ }
+
+ if (!chan->ops->recv(chan, skb))
+ goto done;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ l2cap_data_rcv(chan, skb);
+ goto done;
+
+ default:
+ BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
+ break;
+ }
+
+drop:
+ kfree_skb(skb);
+
+done:
+ l2cap_chan_unlock(chan);
+}
+
+static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ struct sk_buff *skb)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_chan *chan;
+
+ if (hcon->type != ACL_LINK)
+ goto drop;
+
+ chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
+ ACL_LINK);
+ if (!chan)
+ goto drop;
+
+ BT_DBG("chan %p, len %d", chan, skb->len);
+
+ if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
+ goto drop;
+
+ if (chan->imtu < skb->len)
+ goto drop;
+
+ /* Store remote BD_ADDR and PSM for msg_name */
+ bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
+ bt_cb(skb)->psm = psm;
+
+ if (!chan->ops->recv(chan, skb))
+ return;
+
+drop:
+ kfree_skb(skb);
+}
+
+static void l2cap_att_channel(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct l2cap_chan *chan;
+
+ if (hcon->type != LE_LINK)
+ goto drop;
+
+ chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
+ &hcon->src, &hcon->dst);
+ if (!chan)
+ goto drop;
+
+ BT_DBG("chan %p, len %d", chan, skb->len);
+
+ if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
+ goto drop;
+
+ if (chan->imtu < skb->len)
+ goto drop;
+
+ if (!chan->ops->recv(chan, skb))
+ return;
+
+drop:
+ kfree_skb(skb);
+}
+
+static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct l2cap_hdr *lh = (void *) skb->data;
+ struct hci_conn *hcon = conn->hcon;
+ u16 cid, len;
+ __le16 psm;
+
+ if (hcon->state != BT_CONNECTED) {
+ BT_DBG("queueing pending rx skb");
+ skb_queue_tail(&conn->pending_rx, skb);
+ return;
+ }
+
+ skb_pull(skb, L2CAP_HDR_SIZE);
+ cid = __le16_to_cpu(lh->cid);
+ len = __le16_to_cpu(lh->len);
+
+ if (len != skb->len) {
+ kfree_skb(skb);
+ return;
+ }
+
+ BT_DBG("len %d, cid 0x%4.4x", len, cid);
+
+ switch (cid) {
+ case L2CAP_CID_SIGNALING:
+ l2cap_sig_channel(conn, skb);
+ break;
+
+ case L2CAP_CID_CONN_LESS:
+ psm = get_unaligned((__le16 *) skb->data);
+ skb_pull(skb, L2CAP_PSMLEN_SIZE);
+ l2cap_conless_channel(conn, psm, skb);
+ break;
+
+ case L2CAP_CID_ATT:
+ l2cap_att_channel(conn, skb);
+ break;
+
+ case L2CAP_CID_LE_SIGNALING:
+ l2cap_le_sig_channel(conn, skb);
+ break;
+
+ case L2CAP_CID_SMP:
+ if (smp_sig_channel(conn, skb))
+ l2cap_conn_del(conn->hcon, EACCES);
+ break;
+
+ case L2CAP_FC_6LOWPAN:
+ bt_6lowpan_recv(conn, skb);
+ break;
+
+ default:
+ l2cap_data_channel(conn, cid, skb);
+ break;
+ }
+}
+
+static void process_pending_rx(struct work_struct *work)
+{
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ pending_rx_work);
+ struct sk_buff *skb;
+
+ BT_DBG("");
+
+ while ((skb = skb_dequeue(&conn->pending_rx)))
+ l2cap_recv_frame(conn, skb);
+}
+
+static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_chan *hchan;
+
+ if (conn)
+ return conn;
+
+ hchan = hci_chan_create(hcon);
+ if (!hchan)
+ return NULL;
+
+ conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
+ if (!conn) {
+ hci_chan_del(hchan);
+ return NULL;
+ }
+
+ kref_init(&conn->ref);
+ hcon->l2cap_data = conn;
+ conn->hcon = hcon;
+ hci_conn_get(conn->hcon);
+ conn->hchan = hchan;
+
+ BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
+
+ switch (hcon->type) {
+ case LE_LINK:
+ if (hcon->hdev->le_mtu) {
+ conn->mtu = hcon->hdev->le_mtu;
+ break;
+ }
+ /* fall through */
+ default:
+ conn->mtu = hcon->hdev->acl_mtu;
+ break;
+ }
+
+ conn->feat_mask = 0;
+
+ if (hcon->type == ACL_LINK)
+ conn->hs_enabled = test_bit(HCI_HS_ENABLED,
+ &hcon->hdev->dev_flags);
+
+ spin_lock_init(&conn->lock);
+ mutex_init(&conn->chan_lock);
+
+ INIT_LIST_HEAD(&conn->chan_l);
+ INIT_LIST_HEAD(&conn->users);
+
+ if (hcon->type == LE_LINK)
+ INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
+ else
+ INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
+
+ skb_queue_head_init(&conn->pending_rx);
+ INIT_WORK(&conn->pending_rx_work, process_pending_rx);
+
+ conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
+
+ return conn;
+}
+
+static bool is_valid_psm(u16 psm, u8 dst_type) {
+ if (!psm)
+ return false;
+
+ if (bdaddr_type_is_le(dst_type))
+ return (psm <= 0x00ff);
+
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ return ((psm & 0x0101) == 0x0001);
+}
+
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type)
+{
+ struct l2cap_conn *conn;
+ struct hci_conn *hcon;
+ struct hci_dev *hdev;
+ __u8 auth_type;
+ int err;
+
+ BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
+ dst_type, __le16_to_cpu(psm));
+
+ hdev = hci_get_route(dst, &chan->src);
+ if (!hdev)
+ return -EHOSTUNREACH;
+
+ hci_dev_lock(hdev);
+
+ l2cap_chan_lock(chan);
+
+ if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_LE_FLOWCTL:
+ l2cap_le_flowctl_init(chan);
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ switch (chan->state) {
+ case BT_CONNECT:
+ case BT_CONNECT2:
+ case BT_CONFIG:
+ /* Already connecting */
+ err = 0;
+ goto done;
+
+ case BT_CONNECTED:
+ /* Already connected */
+ err = -EISCONN;
+ goto done;
+
+ case BT_OPEN:
+ case BT_BOUND:
+ /* Can connect */
+ break;
+
+ default:
+ err = -EBADFD;
+ goto done;
+ }
+
+ /* Set destination address and psm */
+ bacpy(&chan->dst, dst);
+ chan->dst_type = dst_type;
+
+ chan->psm = psm;
+ chan->dcid = cid;
+
+ auth_type = l2cap_get_auth_type(chan);
+
+ if (bdaddr_type_is_le(dst_type)) {
+ /* Convert from L2CAP channel address type to HCI address type
+ */
+ if (dst_type == BDADDR_LE_PUBLIC)
+ dst_type = ADDR_LE_DEV_PUBLIC;
+ else
+ dst_type = ADDR_LE_DEV_RANDOM;
+
+ hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
+ auth_type);
+ } else {
+ hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
+ }
+
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto done;
+ }
+
+ conn = l2cap_conn_add(hcon);
+ if (!conn) {
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
+ hci_conn_drop(hcon);
+ err = -EBUSY;
+ goto done;
+ }
+
+ /* Update source addr of the socket */
+ bacpy(&chan->src, &hcon->src);
+ chan->src_type = bdaddr_type(hcon, hcon->src_type);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_add(conn, chan);
+ l2cap_chan_lock(chan);
+
+ /* l2cap_chan_add takes its own ref so we can drop this one */
+ hci_conn_drop(hcon);
+
+ l2cap_state_change(chan, BT_CONNECT);
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+
+ /* Release chan->sport so that it can be reused by other
+ * sockets (as it's only used for listening sockets).
+ */
+ write_lock(&chan_list_lock);
+ chan->sport = 0;
+ write_unlock(&chan_list_lock);
+
+ if (hcon->state == BT_CONNECTED) {
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ __clear_chan_timer(chan);
+ if (l2cap_chan_check_security(chan))
+ l2cap_state_change(chan, BT_CONNECTED);
+ } else
+ l2cap_do_start(chan);
+ }
+
+ err = 0;
+
+done:
+ l2cap_chan_unlock(chan);
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
+ return err;
+}
+
+/* ---- L2CAP interface with lower layer (HCI) ---- */
+
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+ int exact = 0, lm1 = 0, lm2 = 0;
+ struct l2cap_chan *c;
+
+ BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
+
+ /* Find listening sockets and check their link_mode */
+ read_lock(&chan_list_lock);
+ list_for_each_entry(c, &chan_list, global_l) {
+ if (c->state != BT_LISTEN)
+ continue;
+
+ if (!bacmp(&c->src, &hdev->bdaddr)) {
+ lm1 |= HCI_LM_ACCEPT;
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
+ lm1 |= HCI_LM_MASTER;
+ exact++;
+ } else if (!bacmp(&c->src, BDADDR_ANY)) {
+ lm2 |= HCI_LM_ACCEPT;
+ if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
+ lm2 |= HCI_LM_MASTER;
+ }
+ }
+ read_unlock(&chan_list_lock);
+
+ return exact ? lm1 : lm2;
+}
+
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+{
+ struct l2cap_conn *conn;
+
+ BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+
+ if (!status) {
+ conn = l2cap_conn_add(hcon);
+ if (conn)
+ l2cap_conn_ready(conn);
+ } else {
+ l2cap_conn_del(hcon, bt_to_errno(status));
+ }
+}
+
+int l2cap_disconn_ind(struct hci_conn *hcon)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+
+ BT_DBG("hcon %p", hcon);
+
+ if (!conn)
+ return HCI_ERROR_REMOTE_USER_TERM;
+ return conn->disc_reason;
+}
+
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+{
+ BT_DBG("hcon %p reason %d", hcon, reason);
+
+ bt_6lowpan_del_conn(hcon->l2cap_data);
+
+ l2cap_conn_del(hcon, bt_to_errno(reason));
+}
+
+static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
+{
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
+ return;
+
+ if (encrypt == 0x00) {
+ if (chan->sec_level == BT_SECURITY_MEDIUM) {
+ __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
+ } else if (chan->sec_level == BT_SECURITY_HIGH ||
+ chan->sec_level == BT_SECURITY_FIPS)
+ l2cap_chan_close(chan, ECONNREFUSED);
+ } else {
+ if (chan->sec_level == BT_SECURITY_MEDIUM)
+ __clear_chan_timer(chan);
+ }
+}
+
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_chan *chan;
+
+ if (!conn)
+ return 0;
+
+ BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
+
+ if (hcon->type == LE_LINK) {
+ if (!status && encrypt)
+ smp_distribute_keys(conn);
+ cancel_delayed_work(&conn->security_timer);
+ }
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ l2cap_chan_lock(chan);
+
+ BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
+ state_to_string(chan->state));
+
+ if (chan->scid == L2CAP_CID_A2MP) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (chan->scid == L2CAP_CID_ATT) {
+ if (!status && encrypt) {
+ chan->sec_level = hcon->sec_level;
+ l2cap_chan_ready(chan);
+ }
+
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (!__l2cap_no_conn_pending(chan)) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (!status && (chan->state == BT_CONNECTED ||
+ chan->state == BT_CONFIG)) {
+ chan->ops->resume(chan);
+ l2cap_check_encryption(chan, encrypt);
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ if (chan->state == BT_CONNECT) {
+ if (!status)
+ l2cap_start_connection(chan);
+ else
+ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
+ } else if (chan->state == BT_CONNECT2) {
+ struct l2cap_conn_rsp rsp;
+ __u16 res, stat;
+
+ if (!status) {
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ res = L2CAP_CR_PEND;
+ stat = L2CAP_CS_AUTHOR_PEND;
+ chan->ops->defer(chan);
+ } else {
+ l2cap_state_change(chan, BT_CONFIG);
+ res = L2CAP_CR_SUCCESS;
+ stat = L2CAP_CS_NO_INFO;
+ }
+ } else {
+ l2cap_state_change(chan, BT_DISCONN);
+ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
+ res = L2CAP_CR_SEC_BLOCK;
+ stat = L2CAP_CS_NO_INFO;
+ }
+
+ rsp.scid = cpu_to_le16(chan->dcid);
+ rsp.dcid = cpu_to_le16(chan->scid);
+ rsp.result = cpu_to_le16(res);
+ rsp.status = cpu_to_le16(stat);
+ l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
+ sizeof(rsp), &rsp);
+
+ if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ res == L2CAP_CR_SUCCESS) {
+ char buf[128];
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf),
+ buf);
+ chan->num_conf_req++;
+ }
+ }
+
+ l2cap_chan_unlock(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+
+ return 0;
+}
+
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_hdr *hdr;
+ int len;
+
+ /* For AMP controller do not create l2cap conn */
+ if (!conn && hcon->hdev->dev_type != HCI_BREDR)
+ goto drop;
+
+ if (!conn)
+ conn = l2cap_conn_add(hcon);
+
+ if (!conn)
+ goto drop;
+
+ BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
+
+ switch (flags) {
+ case ACL_START:
+ case ACL_START_NO_FLUSH:
+ case ACL_COMPLETE:
+ if (conn->rx_len) {
+ BT_ERR("Unexpected start frame (len %d)", skb->len);
+ kfree_skb(conn->rx_skb);
+ conn->rx_skb = NULL;
+ conn->rx_len = 0;
+ l2cap_conn_unreliable(conn, ECOMM);
+ }
+
+ /* Start fragment always begin with Basic L2CAP header */
+ if (skb->len < L2CAP_HDR_SIZE) {
+ BT_ERR("Frame is too short (len %d)", skb->len);
+ l2cap_conn_unreliable(conn, ECOMM);
+ goto drop;
+ }
+
+ hdr = (struct l2cap_hdr *) skb->data;
+ len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
+
+ if (len == skb->len) {
+ /* Complete frame received */
+ l2cap_recv_frame(conn, skb);
+ return 0;
+ }
+
+ BT_DBG("Start: total len %d, frag len %d", len, skb->len);
+
+ if (skb->len > len) {
+ BT_ERR("Frame is too long (len %d, expected len %d)",
+ skb->len, len);
+ l2cap_conn_unreliable(conn, ECOMM);
+ goto drop;
+ }
+
+ /* Allocate skb for the complete frame (with header) */
+ conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
+ if (!conn->rx_skb)
+ goto drop;
+
+ skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+ skb->len);
+ conn->rx_len = len - skb->len;
+ break;
+
+ case ACL_CONT:
+ BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
+
+ if (!conn->rx_len) {
+ BT_ERR("Unexpected continuation frame (len %d)", skb->len);
+ l2cap_conn_unreliable(conn, ECOMM);
+ goto drop;
+ }
+
+ if (skb->len > conn->rx_len) {
+ BT_ERR("Fragment is too long (len %d, expected %d)",
+ skb->len, conn->rx_len);
+ kfree_skb(conn->rx_skb);
+ conn->rx_skb = NULL;
+ conn->rx_len = 0;
+ l2cap_conn_unreliable(conn, ECOMM);
+ goto drop;
+ }
+
+ skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
+ skb->len);
+ conn->rx_len -= skb->len;
+
+ if (!conn->rx_len) {
+ /* Complete frame received. l2cap_recv_frame
+ * takes ownership of the skb so set the global
+ * rx_skb pointer to NULL first.
+ */
+ struct sk_buff *rx_skb = conn->rx_skb;
+ conn->rx_skb = NULL;
+ l2cap_recv_frame(conn, rx_skb);
+ }
+ break;
+ }
+
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int l2cap_debugfs_show(struct seq_file *f, void *p)
+{
+ struct l2cap_chan *c;
+
+ read_lock(&chan_list_lock);
+
+ list_for_each_entry(c, &chan_list, global_l) {
+ seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
+ &c->src, &c->dst,
+ c->state, __le16_to_cpu(c->psm),
+ c->scid, c->dcid, c->imtu, c->omtu,
+ c->sec_level, c->mode);
+ }
+
+ read_unlock(&chan_list_lock);
+
+ return 0;
+}
+
+static int l2cap_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, l2cap_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations l2cap_debugfs_fops = {
+ .open = l2cap_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *l2cap_debugfs;
+
+int __init l2cap_init(void)
+{
+ int err;
+
+ err = l2cap_init_sockets();
+ if (err < 0)
+ return err;
+
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
+ NULL, &l2cap_debugfs_fops);
+
+ debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
+ &le_max_credits);
+ debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
+ &le_default_mps);
+
+ bt_6lowpan_init();
+
+ return 0;
+}
+
+void l2cap_exit(void)
+{
+ bt_6lowpan_cleanup();
+ debugfs_remove(l2cap_debugfs);
+ l2cap_cleanup_sockets();
+}
+
+module_param(disable_ertm, bool, 0644);
+MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
new file mode 100644
index 00000000000..e1378693cc9
--- /dev/null
+++ b/net/bluetooth/l2cap_sock.c
@@ -0,0 +1,1618 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
+ Copyright (C) 2010 Google Inc.
+ Copyright (C) 2011 ProFUSION Embedded Systems
+
+ Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+/* Bluetooth L2CAP sockets. */
+
+#include <linux/module.h>
+#include <linux/export.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+
+#include "smp.h"
+
+static struct bt_sock_list l2cap_sk_list = {
+ .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
+};
+
+static const struct proto_ops l2cap_sock_ops;
+static void l2cap_sock_init(struct sock *sk, struct sock *parent);
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ int proto, gfp_t prio);
+
+bool l2cap_is_socket(struct socket *sock)
+{
+ return sock && sock->ops == &l2cap_sock_ops;
+}
+EXPORT_SYMBOL(l2cap_is_socket);
+
+static int l2cap_validate_bredr_psm(u16 psm)
+{
+ /* PSM must be odd and lsb of upper byte must be 0 */
+ if ((psm & 0x0101) != 0x0001)
+ return -EINVAL;
+
+ /* Restrict usage of well-known PSMs */
+ if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE))
+ return -EACCES;
+
+ return 0;
+}
+
+static int l2cap_validate_le_psm(u16 psm)
+{
+ /* Valid LE_PSM ranges are defined only until 0x00ff */
+ if (psm > 0x00ff)
+ return -EINVAL;
+
+ /* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */
+ if (psm <= 0x007f && !capable(CAP_NET_BIND_SERVICE))
+ return -EACCES;
+
+ return 0;
+}
+
+static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct sockaddr_l2 la;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (!addr || addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
+ memset(&la, 0, sizeof(la));
+ len = min_t(unsigned int, sizeof(la), alen);
+ memcpy(&la, addr, len);
+
+ if (la.l2_cid && la.l2_psm)
+ return -EINVAL;
+
+ if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+ return -EINVAL;
+
+ if (la.l2_cid) {
+ /* When the socket gets created it defaults to
+ * CHAN_CONN_ORIENTED, so we need to overwrite the
+ * default here.
+ */
+ chan->chan_type = L2CAP_CHAN_FIXED;
+ chan->omtu = L2CAP_DEFAULT_MTU;
+ }
+
+ if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+ /* We only allow ATT user space socket */
+ if (la.l2_cid &&
+ la.l2_cid != cpu_to_le16(L2CAP_CID_ATT))
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_OPEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ if (la.l2_psm) {
+ __u16 psm = __le16_to_cpu(la.l2_psm);
+
+ if (la.l2_bdaddr_type == BDADDR_BREDR)
+ err = l2cap_validate_bredr_psm(psm);
+ else
+ err = l2cap_validate_le_psm(psm);
+
+ if (err)
+ goto done;
+ }
+
+ if (la.l2_cid)
+ err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
+ else
+ err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
+
+ if (err < 0)
+ goto done;
+
+ switch (chan->chan_type) {
+ case L2CAP_CHAN_CONN_LESS:
+ if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_3DSP)
+ chan->sec_level = BT_SECURITY_SDP;
+ break;
+ case L2CAP_CHAN_CONN_ORIENTED:
+ if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
+ __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
+ chan->sec_level = BT_SECURITY_SDP;
+ break;
+ case L2CAP_CHAN_RAW:
+ chan->sec_level = BT_SECURITY_SDP;
+ break;
+ }
+
+ bacpy(&chan->src, &la.l2_bdaddr);
+ chan->src_type = la.l2_bdaddr_type;
+
+ if (chan->psm && bdaddr_type_is_le(chan->src_type))
+ chan->mode = L2CAP_MODE_LE_FLOWCTL;
+
+ chan->state = BT_BOUND;
+ sk->sk_state = BT_BOUND;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct sockaddr_l2 la;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (!addr || alen < sizeof(addr->sa_family) ||
+ addr->sa_family != AF_BLUETOOTH)
+ return -EINVAL;
+
+ memset(&la, 0, sizeof(la));
+ len = min_t(unsigned int, sizeof(la), alen);
+ memcpy(&la, addr, len);
+
+ if (la.l2_cid && la.l2_psm)
+ return -EINVAL;
+
+ if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+ return -EINVAL;
+
+ /* Check that the socket wasn't bound to something that
+ * conflicts with the address given to connect(). If chan->src
+ * is BDADDR_ANY it means bind() was never used, in which case
+ * chan->src_type and la.l2_bdaddr_type do not need to match.
+ */
+ if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) &&
+ bdaddr_type_is_le(la.l2_bdaddr_type)) {
+ /* Old user space versions will try to incorrectly bind
+ * the ATT socket using BDADDR_BREDR. We need to accept
+ * this and fix up the source address type only when
+ * both the source CID and destination CID indicate
+ * ATT. Anything else is an invalid combination.
+ */
+ if (chan->scid != L2CAP_CID_ATT ||
+ la.l2_cid != cpu_to_le16(L2CAP_CID_ATT))
+ return -EINVAL;
+
+ /* We don't have the hdev available here to make a
+ * better decision on random vs public, but since all
+ * user space versions that exhibit this issue anyway do
+ * not support random local addresses assuming public
+ * here is good enough.
+ */
+ chan->src_type = BDADDR_LE_PUBLIC;
+ }
+
+ if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR)
+ return -EINVAL;
+
+ if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+ /* We only allow ATT user space socket */
+ if (la.l2_cid &&
+ la.l2_cid != cpu_to_le16(L2CAP_CID_ATT))
+ return -EINVAL;
+ }
+
+ if (chan->psm && bdaddr_type_is_le(chan->src_type))
+ chan->mode = L2CAP_MODE_LE_FLOWCTL;
+
+ err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
+ &la.l2_bdaddr, la.l2_bdaddr_type);
+ if (err)
+ return err;
+
+ lock_sock(sk);
+
+ err = bt_sock_wait_state(sk, BT_CONNECTED,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+
+ release_sock(sk);
+
+ return err;
+}
+
+static int l2cap_sock_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ int err = 0;
+
+ BT_DBG("sk %p backlog %d", sk, backlog);
+
+ lock_sock(sk);
+
+ if (sk->sk_state != BT_BOUND) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ switch (chan->mode) {
+ case L2CAP_MODE_BASIC:
+ case L2CAP_MODE_LE_FLOWCTL:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+
+ chan->state = BT_LISTEN;
+ sk->sk_state = BT_LISTEN;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
+ int flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sock *sk = sock->sk, *nsk;
+ long timeo;
+ int err = 0;
+
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+ BT_DBG("sk %p timeo %ld", sk, timeo);
+
+ /* Wait for an incoming connection. (wake-one). */
+ add_wait_queue_exclusive(sk_sleep(sk), &wait);
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
+ break;
+ }
+
+ nsk = bt_accept_dequeue(sk, newsock);
+ if (nsk)
+ break;
+
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ if (err)
+ goto done;
+
+ newsock->state = SS_CONNECTED;
+
+ BT_DBG("new socket %p", nsk);
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *len, int peer)
+{
+ struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (peer && sk->sk_state != BT_CONNECTED &&
+ sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
+ return -ENOTCONN;
+
+ memset(la, 0, sizeof(struct sockaddr_l2));
+ addr->sa_family = AF_BLUETOOTH;
+ *len = sizeof(struct sockaddr_l2);
+
+ la->l2_psm = chan->psm;
+
+ if (peer) {
+ bacpy(&la->l2_bdaddr, &chan->dst);
+ la->l2_cid = cpu_to_le16(chan->dcid);
+ la->l2_bdaddr_type = chan->dst_type;
+ } else {
+ bacpy(&la->l2_bdaddr, &chan->src);
+ la->l2_cid = cpu_to_le16(chan->scid);
+ la->l2_bdaddr_type = chan->src_type;
+ }
+
+ return 0;
+}
+
+static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_options opts;
+ struct l2cap_conninfo cinfo;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case L2CAP_OPTIONS:
+ /* LE sockets should use BT_SNDMTU/BT_RCVMTU, but since
+ * legacy ATT code depends on getsockopt for
+ * L2CAP_OPTIONS we need to let this pass.
+ */
+ if (bdaddr_type_is_le(chan->src_type) &&
+ chan->scid != L2CAP_CID_ATT) {
+ err = -EINVAL;
+ break;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.imtu = chan->imtu;
+ opts.omtu = chan->omtu;
+ opts.flush_to = chan->flush_to;
+ opts.mode = chan->mode;
+ opts.fcs = chan->fcs;
+ opts.max_tx = chan->max_tx;
+ opts.txwin_size = chan->tx_win;
+
+ len = min_t(unsigned int, len, sizeof(opts));
+ if (copy_to_user(optval, (char *) &opts, len))
+ err = -EFAULT;
+
+ break;
+
+ case L2CAP_LM:
+ switch (chan->sec_level) {
+ case BT_SECURITY_LOW:
+ opt = L2CAP_LM_AUTH;
+ break;
+ case BT_SECURITY_MEDIUM:
+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
+ break;
+ case BT_SECURITY_HIGH:
+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
+ L2CAP_LM_SECURE;
+ break;
+ case BT_SECURITY_FIPS:
+ opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
+ L2CAP_LM_SECURE | L2CAP_LM_FIPS;
+ break;
+ default:
+ opt = 0;
+ break;
+ }
+
+ if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
+ opt |= L2CAP_LM_MASTER;
+
+ if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
+ opt |= L2CAP_LM_RELIABLE;
+
+ if (put_user(opt, (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ case L2CAP_CONNINFO:
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
+ err = -ENOTCONN;
+ break;
+ }
+
+ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = chan->conn->hcon->handle;
+ memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
+
+ len = min_t(unsigned int, len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *) &cinfo, len))
+ err = -EFAULT;
+
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct bt_security sec;
+ struct bt_power pwr;
+ int len, err = 0;
+
+ BT_DBG("sk %p", sk);
+
+ if (level == SOL_L2CAP)
+ return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
+
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SECURITY:
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+ chan->chan_type != L2CAP_CHAN_FIXED &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ memset(&sec, 0, sizeof(sec));
+ if (chan->conn) {
+ sec.level = chan->conn->hcon->sec_level;
+
+ if (sk->sk_state == BT_CONNECTED)
+ sec.key_size = chan->conn->hcon->enc_key_size;
+ } else {
+ sec.level = chan->sec_level;
+ }
+
+ len = min_t(unsigned int, len, sizeof(sec));
+ if (copy_to_user(optval, (char *) &sec, len))
+ err = -EFAULT;
+
+ break;
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ case BT_FLUSHABLE:
+ if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
+ (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ case BT_POWER:
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
+ && sk->sk_type != SOCK_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+
+ len = min_t(unsigned int, len, sizeof(pwr));
+ if (copy_to_user(optval, (char *) &pwr, len))
+ err = -EFAULT;
+
+ break;
+
+ case BT_CHANNEL_POLICY:
+ if (put_user(chan->chan_policy, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ case BT_SNDMTU:
+ if (!bdaddr_type_is_le(chan->src_type)) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (sk->sk_state != BT_CONNECTED) {
+ err = -ENOTCONN;
+ break;
+ }
+
+ if (put_user(chan->omtu, (u16 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ case BT_RCVMTU:
+ if (!bdaddr_type_is_le(chan->src_type)) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (put_user(chan->imtu, (u16 __user *) optval))
+ err = -EFAULT;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
+{
+ switch (chan->scid) {
+ case L2CAP_CID_ATT:
+ if (mtu < L2CAP_LE_MIN_MTU)
+ return false;
+ break;
+
+ default:
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ return false;
+ }
+
+ return true;
+}
+
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct l2cap_options opts;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case L2CAP_OPTIONS:
+ if (bdaddr_type_is_le(chan->src_type)) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (sk->sk_state == BT_CONNECTED) {
+ err = -EINVAL;
+ break;
+ }
+
+ opts.imtu = chan->imtu;
+ opts.omtu = chan->omtu;
+ opts.flush_to = chan->flush_to;
+ opts.mode = chan->mode;
+ opts.fcs = chan->fcs;
+ opts.max_tx = chan->max_tx;
+ opts.txwin_size = chan->tx_win;
+
+ len = min_t(unsigned int, sizeof(opts), optlen);
+ if (copy_from_user((char *) &opts, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (!l2cap_valid_mtu(chan, opts.imtu)) {
+ err = -EINVAL;
+ break;
+ }
+
+ chan->mode = opts.mode;
+ switch (chan->mode) {
+ case L2CAP_MODE_LE_FLOWCTL:
+ break;
+ case L2CAP_MODE_BASIC:
+ clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (!disable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ chan->imtu = opts.imtu;
+ chan->omtu = opts.omtu;
+ chan->fcs = opts.fcs;
+ chan->max_tx = opts.max_tx;
+ chan->tx_win = opts.txwin_size;
+ chan->flush_to = opts.flush_to;
+ break;
+
+ case L2CAP_LM:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt & L2CAP_LM_FIPS) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (opt & L2CAP_LM_AUTH)
+ chan->sec_level = BT_SECURITY_LOW;
+ if (opt & L2CAP_LM_ENCRYPT)
+ chan->sec_level = BT_SECURITY_MEDIUM;
+ if (opt & L2CAP_LM_SECURE)
+ chan->sec_level = BT_SECURITY_HIGH;
+
+ if (opt & L2CAP_LM_MASTER)
+ set_bit(FLAG_ROLE_SWITCH, &chan->flags);
+ else
+ clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
+
+ if (opt & L2CAP_LM_RELIABLE)
+ set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ struct bt_security sec;
+ struct bt_power pwr;
+ struct l2cap_conn *conn;
+ int len, err = 0;
+ u32 opt;
+
+ BT_DBG("sk %p", sk);
+
+ if (level == SOL_L2CAP)
+ return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
+
+ if (level != SOL_BLUETOOTH)
+ return -ENOPROTOOPT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case BT_SECURITY:
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+ chan->chan_type != L2CAP_CHAN_FIXED &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ sec.level = BT_SECURITY_LOW;
+
+ len = min_t(unsigned int, sizeof(sec), optlen);
+ if (copy_from_user((char *) &sec, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (sec.level < BT_SECURITY_LOW ||
+ sec.level > BT_SECURITY_HIGH) {
+ err = -EINVAL;
+ break;
+ }
+
+ chan->sec_level = sec.level;
+
+ if (!chan->conn)
+ break;
+
+ conn = chan->conn;
+
+ /*change security for LE channels */
+ if (chan->scid == L2CAP_CID_ATT) {
+ if (smp_conn_security(conn->hcon, sec.level))
+ break;
+ sk->sk_state = BT_CONFIG;
+ chan->state = BT_CONFIG;
+
+ /* or for ACL link */
+ } else if ((sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
+ sk->sk_state == BT_CONNECTED) {
+ if (!l2cap_chan_check_security(chan))
+ set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ else
+ sk->sk_state_change(sk);
+ } else {
+ err = -EINVAL;
+ }
+ break;
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt) {
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ set_bit(FLAG_DEFER_SETUP, &chan->flags);
+ } else {
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ clear_bit(FLAG_DEFER_SETUP, &chan->flags);
+ }
+ break;
+
+ case BT_FLUSHABLE:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_FLUSHABLE_ON) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (opt == BT_FLUSHABLE_OFF) {
+ conn = chan->conn;
+ /* proceed further only when we have l2cap_conn and
+ No Flush support in the LM */
+ if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (opt)
+ set_bit(FLAG_FLUSHABLE, &chan->flags);
+ else
+ clear_bit(FLAG_FLUSHABLE, &chan->flags);
+ break;
+
+ case BT_POWER:
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+ chan->chan_type != L2CAP_CHAN_RAW) {
+ err = -EINVAL;
+ break;
+ }
+
+ pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
+
+ len = min_t(unsigned int, sizeof(pwr), optlen);
+ if (copy_from_user((char *) &pwr, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (pwr.force_active)
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ else
+ clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+ break;
+
+ case BT_CHANNEL_POLICY:
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (chan->mode != L2CAP_MODE_ERTM &&
+ chan->mode != L2CAP_MODE_STREAMING) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ chan->chan_policy = (u8) opt;
+
+ if (sk->sk_state == BT_CONNECTED &&
+ chan->move_role == L2CAP_MOVE_ROLE_NONE)
+ l2cap_move_start(chan);
+
+ break;
+
+ case BT_SNDMTU:
+ if (!bdaddr_type_is_le(chan->src_type)) {
+ err = -EINVAL;
+ break;
+ }
+
+ /* Setting is not supported as it's the remote side that
+ * decides this.
+ */
+ err = -EPERM;
+ break;
+
+ case BT_RCVMTU:
+ if (!bdaddr_type_is_le(chan->src_type)) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (sk->sk_state == BT_CONNECTED) {
+ err = -EISCONN;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ chan->imtu = opt;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
+static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ int err;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ err = sock_error(sk);
+ if (err)
+ return err;
+
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ if (sk->sk_state != BT_CONNECTED)
+ return -ENOTCONN;
+
+ lock_sock(sk);
+ err = bt_sock_wait_ready(sk, msg->msg_flags);
+ release_sock(sk);
+ if (err)
+ return err;
+
+ l2cap_chan_lock(chan);
+ err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+ l2cap_chan_unlock(chan);
+
+ return err;
+}
+
+static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int err;
+
+ lock_sock(sk);
+
+ if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
+ if (bdaddr_type_is_le(pi->chan->src_type)) {
+ sk->sk_state = BT_CONNECTED;
+ pi->chan->state = BT_CONNECTED;
+ __l2cap_le_connect_rsp_defer(pi->chan);
+ } else {
+ sk->sk_state = BT_CONFIG;
+ pi->chan->state = BT_CONFIG;
+ __l2cap_connect_rsp_defer(pi->chan);
+ }
+
+ err = 0;
+ goto done;
+ }
+
+ release_sock(sk);
+
+ if (sock->type == SOCK_STREAM)
+ err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+ else
+ err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
+
+ if (pi->chan->mode != L2CAP_MODE_ERTM)
+ return err;
+
+ /* Attempt to put pending rx data in the socket buffer */
+
+ lock_sock(sk);
+
+ if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
+ goto done;
+
+ if (pi->rx_busy_skb) {
+ if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+ pi->rx_busy_skb = NULL;
+ else
+ goto done;
+ }
+
+ /* Restore data flow when half of the receive buffer is
+ * available. This avoids resending large numbers of
+ * frames.
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+ l2cap_chan_busy(pi->chan, 0);
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+/* Kill socket (only if zapped and orphan)
+ * Must be called on unlocked socket.
+ */
+static void l2cap_sock_kill(struct sock *sk)
+{
+ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+ return;
+
+ BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+
+ /* Kill poor orphan */
+
+ l2cap_chan_put(l2cap_pi(sk)->chan);
+ sock_set_flag(sk, SOCK_DEAD);
+ sock_put(sk);
+}
+
+static int __l2cap_wait_ack(struct sock *sk)
+{
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+ int timeo = HZ/5;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (chan->unacked_frames > 0 && chan->conn) {
+ if (!timeo)
+ timeo = HZ/5;
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return err;
+}
+
+static int l2cap_sock_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_chan *chan;
+ struct l2cap_conn *conn;
+ int err = 0;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ chan = l2cap_pi(sk)->chan;
+ conn = chan->conn;
+
+ if (conn)
+ mutex_lock(&conn->chan_lock);
+
+ l2cap_chan_lock(chan);
+ lock_sock(sk);
+
+ if (!sk->sk_shutdown) {
+ if (chan->mode == L2CAP_MODE_ERTM)
+ err = __l2cap_wait_ack(sk);
+
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ release_sock(sk);
+ l2cap_chan_close(chan, 0);
+ lock_sock(sk);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+
+ if (!err && sk->sk_err)
+ err = -sk->sk_err;
+
+ release_sock(sk);
+ l2cap_chan_unlock(chan);
+
+ if (conn)
+ mutex_unlock(&conn->chan_lock);
+
+ return err;
+}
+
+static int l2cap_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ bt_sock_unlink(&l2cap_sk_list, sk);
+
+ err = l2cap_sock_shutdown(sock, 2);
+
+ sock_orphan(sk);
+ l2cap_sock_kill(sk);
+ return err;
+}
+
+static void l2cap_sock_cleanup_listen(struct sock *parent)
+{
+ struct sock *sk;
+
+ BT_DBG("parent %p", parent);
+
+ /* Close not yet accepted channels */
+ while ((sk = bt_accept_dequeue(parent, NULL))) {
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ l2cap_chan_lock(chan);
+ __clear_chan_timer(chan);
+ l2cap_chan_close(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
+
+ l2cap_sock_kill(sk);
+ }
+}
+
+static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk, *parent = chan->data;
+
+ lock_sock(parent);
+
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(parent)) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ release_sock(parent);
+ return NULL;
+ }
+
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
+ GFP_ATOMIC);
+ if (!sk) {
+ release_sock(parent);
+ return NULL;
+ }
+
+ bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
+
+ l2cap_sock_init(sk, parent);
+
+ bt_accept_enqueue(parent, sk);
+
+ release_sock(parent);
+
+ return l2cap_pi(sk)->chan;
+}
+
+static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct sock *sk = chan->data;
+ int err;
+
+ lock_sock(sk);
+
+ if (l2cap_pi(sk)->rx_busy_skb) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ err = sock_queue_rcv_skb(sk, skb);
+
+ /* For ERTM, handle one skb that doesn't fit into the recv
+ * buffer. This is important to do because the data frames
+ * have already been acked, so the skb cannot be discarded.
+ *
+ * Notify the l2cap core that the buffer is full, so the
+ * LOCAL_BUSY state is entered and no more frames are
+ * acked and reassembled until there is buffer space
+ * available.
+ */
+ if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
+ l2cap_pi(sk)->rx_busy_skb = skb;
+ l2cap_chan_busy(chan, 1);
+ err = 0;
+ }
+
+done:
+ release_sock(sk);
+
+ return err;
+}
+
+static void l2cap_sock_close_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ l2cap_sock_kill(sk);
+}
+
+static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
+{
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ switch (chan->state) {
+ case BT_OPEN:
+ case BT_BOUND:
+ case BT_CLOSED:
+ break;
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ break;
+ default:
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ sk->sk_err = err;
+
+ if (parent) {
+ bt_accept_unlink(sk);
+ parent->sk_data_ready(parent);
+ } else {
+ sk->sk_state_change(sk);
+ }
+
+ break;
+ }
+
+ release_sock(sk);
+}
+
+static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
+ int err)
+{
+ struct sock *sk = chan->data;
+
+ sk->sk_state = state;
+
+ if (err)
+ sk->sk_err = err;
+}
+
+static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long len, int nb)
+{
+ struct sock *sk = chan->data;
+ struct sk_buff *skb;
+ int err;
+
+ l2cap_chan_unlock(chan);
+ skb = bt_skb_send_alloc(sk, len, nb, &err);
+ l2cap_chan_lock(chan);
+
+ if (!skb)
+ return ERR_PTR(err);
+
+ bt_cb(skb)->chan = chan;
+
+ return skb;
+}
+
+static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent);
+
+ release_sock(sk);
+}
+
+static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
+{
+ struct sock *parent, *sk = chan->data;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+ if (parent)
+ parent->sk_data_ready(parent);
+
+ release_sock(sk);
+}
+
+static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ sk->sk_state_change(sk);
+}
+
+static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ lock_sock(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ release_sock(sk);
+}
+
+static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ return sk->sk_sndtimeo;
+}
+
+static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+ sk->sk_state_change(sk);
+}
+
+static struct l2cap_ops l2cap_chan_ops = {
+ .name = "L2CAP Socket Interface",
+ .new_connection = l2cap_sock_new_connection_cb,
+ .recv = l2cap_sock_recv_cb,
+ .close = l2cap_sock_close_cb,
+ .teardown = l2cap_sock_teardown_cb,
+ .state_change = l2cap_sock_state_change_cb,
+ .ready = l2cap_sock_ready_cb,
+ .defer = l2cap_sock_defer_cb,
+ .resume = l2cap_sock_resume_cb,
+ .suspend = l2cap_sock_suspend_cb,
+ .set_shutdown = l2cap_sock_set_shutdown_cb,
+ .get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
+ .alloc_skb = l2cap_sock_alloc_skb_cb,
+};
+
+static void l2cap_sock_destruct(struct sock *sk)
+{
+ BT_DBG("sk %p", sk);
+
+ if (l2cap_pi(sk)->chan)
+ l2cap_chan_put(l2cap_pi(sk)->chan);
+
+ if (l2cap_pi(sk)->rx_busy_skb) {
+ kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+ l2cap_pi(sk)->rx_busy_skb = NULL;
+ }
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+}
+
+static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
+ int *msg_namelen)
+{
+ DECLARE_SOCKADDR(struct sockaddr_l2 *, la, msg_name);
+
+ memset(la, 0, sizeof(struct sockaddr_l2));
+ la->l2_family = AF_BLUETOOTH;
+ la->l2_psm = bt_cb(skb)->psm;
+ bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
+
+ *msg_namelen = sizeof(struct sockaddr_l2);
+}
+
+static void l2cap_sock_init(struct sock *sk, struct sock *parent)
+{
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ BT_DBG("sk %p", sk);
+
+ if (parent) {
+ struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
+
+ sk->sk_type = parent->sk_type;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
+
+ chan->chan_type = pchan->chan_type;
+ chan->imtu = pchan->imtu;
+ chan->omtu = pchan->omtu;
+ chan->conf_state = pchan->conf_state;
+ chan->mode = pchan->mode;
+ chan->fcs = pchan->fcs;
+ chan->max_tx = pchan->max_tx;
+ chan->tx_win = pchan->tx_win;
+ chan->tx_win_max = pchan->tx_win_max;
+ chan->sec_level = pchan->sec_level;
+ chan->flags = pchan->flags;
+ chan->tx_credits = pchan->tx_credits;
+ chan->rx_credits = pchan->rx_credits;
+
+ if (chan->chan_type == L2CAP_CHAN_FIXED) {
+ chan->scid = pchan->scid;
+ chan->dcid = pchan->scid;
+ }
+
+ security_sk_clone(parent, sk);
+ } else {
+ switch (sk->sk_type) {
+ case SOCK_RAW:
+ chan->chan_type = L2CAP_CHAN_RAW;
+ break;
+ case SOCK_DGRAM:
+ chan->chan_type = L2CAP_CHAN_CONN_LESS;
+ bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name;
+ break;
+ case SOCK_SEQPACKET:
+ case SOCK_STREAM:
+ chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
+ break;
+ }
+
+ chan->imtu = L2CAP_DEFAULT_MTU;
+ chan->omtu = 0;
+ if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
+ chan->mode = L2CAP_MODE_ERTM;
+ set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
+ } else {
+ chan->mode = L2CAP_MODE_BASIC;
+ }
+
+ l2cap_chan_set_defaults(chan);
+ }
+
+ /* Default config options */
+ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
+ chan->data = sk;
+ chan->ops = &l2cap_chan_ops;
+}
+
+static struct proto l2cap_proto = {
+ .name = "L2CAP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct l2cap_pinfo)
+};
+
+static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
+ int proto, gfp_t prio)
+{
+ struct sock *sk;
+ struct l2cap_chan *chan;
+
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+ if (!sk)
+ return NULL;
+
+ sock_init_data(sock, sk);
+ INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+
+ sk->sk_destruct = l2cap_sock_destruct;
+ sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
+
+ chan = l2cap_chan_create();
+ if (!chan) {
+ sk_free(sk);
+ return NULL;
+ }
+
+ l2cap_chan_hold(chan);
+
+ l2cap_pi(sk)->chan = chan;
+
+ return sk;
+}
+
+static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+
+ BT_DBG("sock %p", sock);
+
+ sock->state = SS_UNCONNECTED;
+
+ if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
+ sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
+
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+ return -EPERM;
+
+ sock->ops = &l2cap_sock_ops;
+
+ sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+ if (!sk)
+ return -ENOMEM;
+
+ l2cap_sock_init(sk, NULL);
+ bt_sock_link(&l2cap_sk_list, sk);
+ return 0;
+}
+
+static const struct proto_ops l2cap_sock_ops = {
+ .family = PF_BLUETOOTH,
+ .owner = THIS_MODULE,
+ .release = l2cap_sock_release,
+ .bind = l2cap_sock_bind,
+ .connect = l2cap_sock_connect,
+ .listen = l2cap_sock_listen,
+ .accept = l2cap_sock_accept,
+ .getname = l2cap_sock_getname,
+ .sendmsg = l2cap_sock_sendmsg,
+ .recvmsg = l2cap_sock_recvmsg,
+ .poll = bt_sock_poll,
+ .ioctl = bt_sock_ioctl,
+ .mmap = sock_no_mmap,
+ .socketpair = sock_no_socketpair,
+ .shutdown = l2cap_sock_shutdown,
+ .setsockopt = l2cap_sock_setsockopt,
+ .getsockopt = l2cap_sock_getsockopt
+};
+
+static const struct net_proto_family l2cap_sock_family_ops = {
+ .family = PF_BLUETOOTH,
+ .owner = THIS_MODULE,
+ .create = l2cap_sock_create,
+};
+
+int __init l2cap_init_sockets(void)
+{
+ int err;
+
+ err = proto_register(&l2cap_proto, 0);
+ if (err < 0)
+ return err;
+
+ err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
+ if (err < 0) {
+ BT_ERR("L2CAP socket registration failed");
+ goto error;
+ }
+
+ err = bt_procfs_init(&init_net, "l2cap", &l2cap_sk_list,
+ NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create L2CAP proc file");
+ bt_sock_unregister(BTPROTO_L2CAP);
+ goto error;
+ }
+
+ BT_INFO("L2CAP socket layer initialized");
+
+ return 0;
+
+error:
+ proto_unregister(&l2cap_proto);
+ return err;
+}
+
+void l2cap_cleanup_sockets(void)
+{
+ bt_procfs_cleanup(&init_net, "l2cap");
+ bt_sock_unregister(BTPROTO_L2CAP);
+ proto_unregister(&l2cap_proto);
+}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b826d1bf10d..941ad7530ed 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -24,12 +24,9 @@
/* Bluetooth kernel library. */
-#include <linux/module.h>
+#define pr_fmt(fmt) "Bluetooth: " fmt
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <asm/errno.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
@@ -44,22 +41,8 @@ void baswap(bdaddr_t *dst, bdaddr_t *src)
}
EXPORT_SYMBOL(baswap);
-char *batostr(bdaddr_t *ba)
-{
- static char str[2][18];
- static int i = 1;
-
- i ^= 1;
- sprintf(str[i], "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X",
- ba->b[5], ba->b[4], ba->b[3],
- ba->b[2], ba->b[1], ba->b[0]);
-
- return str[i];
-}
-EXPORT_SYMBOL(batostr);
-
/* Bluetooth error codes to Unix errno mapping */
-int bt_err(__u16 code)
+int bt_to_errno(__u16 code)
{
switch (code) {
case 0:
@@ -75,6 +58,7 @@ int bt_err(__u16 code)
return EIO;
case 0x04:
+ case 0x3c:
return EHOSTDOWN;
case 0x05:
@@ -149,4 +133,42 @@ int bt_err(__u16 code)
return ENOSYS;
}
}
+EXPORT_SYMBOL(bt_to_errno);
+
+int bt_info(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ r = pr_info("%pV", &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(bt_info);
+
+int bt_err(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, format);
+
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ r = pr_err("%pV", &vaf);
+
+ va_end(args);
+
+ return r;
+}
EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index f827fd90838..af8e0a6243b 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1,6 +1,8 @@
/*
BlueZ - Bluetooth protocol stack for Linux
+
Copyright (C) 2010 Nokia Corporation
+ Copyright (C) 2011-2012 Intel Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
@@ -22,199 +24,4839 @@
/* Bluetooth HCI Management interface */
-#include <asm/uaccess.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
-#define MGMT_VERSION 0
-#define MGMT_REVISION 1
+#include "smp.h"
+
+#define MGMT_VERSION 1
+#define MGMT_REVISION 6
+
+static const u16 mgmt_commands[] = {
+ MGMT_OP_READ_INDEX_LIST,
+ MGMT_OP_READ_INFO,
+ MGMT_OP_SET_POWERED,
+ MGMT_OP_SET_DISCOVERABLE,
+ MGMT_OP_SET_CONNECTABLE,
+ MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_OP_SET_PAIRABLE,
+ MGMT_OP_SET_LINK_SECURITY,
+ MGMT_OP_SET_SSP,
+ MGMT_OP_SET_HS,
+ MGMT_OP_SET_LE,
+ MGMT_OP_SET_DEV_CLASS,
+ MGMT_OP_SET_LOCAL_NAME,
+ MGMT_OP_ADD_UUID,
+ MGMT_OP_REMOVE_UUID,
+ MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_OP_DISCONNECT,
+ MGMT_OP_GET_CONNECTIONS,
+ MGMT_OP_PIN_CODE_REPLY,
+ MGMT_OP_PIN_CODE_NEG_REPLY,
+ MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_OP_PAIR_DEVICE,
+ MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_OP_UNPAIR_DEVICE,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ MGMT_OP_START_DISCOVERY,
+ MGMT_OP_STOP_DISCOVERY,
+ MGMT_OP_CONFIRM_NAME,
+ MGMT_OP_BLOCK_DEVICE,
+ MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_OP_SET_DEVICE_ID,
+ MGMT_OP_SET_ADVERTISING,
+ MGMT_OP_SET_BREDR,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_OP_SET_SECURE_CONN,
+ MGMT_OP_SET_DEBUG_KEYS,
+ MGMT_OP_SET_PRIVACY,
+ MGMT_OP_LOAD_IRKS,
+ MGMT_OP_GET_CONN_INFO,
+};
+
+static const u16 mgmt_events[] = {
+ MGMT_EV_CONTROLLER_ERROR,
+ MGMT_EV_INDEX_ADDED,
+ MGMT_EV_INDEX_REMOVED,
+ MGMT_EV_NEW_SETTINGS,
+ MGMT_EV_CLASS_OF_DEV_CHANGED,
+ MGMT_EV_LOCAL_NAME_CHANGED,
+ MGMT_EV_NEW_LINK_KEY,
+ MGMT_EV_NEW_LONG_TERM_KEY,
+ MGMT_EV_DEVICE_CONNECTED,
+ MGMT_EV_DEVICE_DISCONNECTED,
+ MGMT_EV_CONNECT_FAILED,
+ MGMT_EV_PIN_CODE_REQUEST,
+ MGMT_EV_USER_CONFIRM_REQUEST,
+ MGMT_EV_USER_PASSKEY_REQUEST,
+ MGMT_EV_AUTH_FAILED,
+ MGMT_EV_DEVICE_FOUND,
+ MGMT_EV_DISCOVERING,
+ MGMT_EV_DEVICE_BLOCKED,
+ MGMT_EV_DEVICE_UNBLOCKED,
+ MGMT_EV_DEVICE_UNPAIRED,
+ MGMT_EV_PASSKEY_NOTIFY,
+ MGMT_EV_NEW_IRK,
+ MGMT_EV_NEW_CSRK,
+};
+
+#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
+
+#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
+ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+
+struct pending_cmd {
+ struct list_head list;
+ u16 opcode;
+ int index;
+ void *param;
+ struct sock *sk;
+ void *user_data;
+};
+
+/* HCI to MGMT error code conversion table */
+static u8 mgmt_status_table[] = {
+ MGMT_STATUS_SUCCESS,
+ MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
+ MGMT_STATUS_NOT_CONNECTED, /* No Connection */
+ MGMT_STATUS_FAILED, /* Hardware Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
+ MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
+ MGMT_STATUS_NO_RESOURCES, /* Memory Full */
+ MGMT_STATUS_TIMEOUT, /* Connection Timeout */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
+ MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
+ MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
+ MGMT_STATUS_BUSY, /* Command Disallowed */
+ MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
+ MGMT_STATUS_REJECTED, /* Rejected Security */
+ MGMT_STATUS_REJECTED, /* Rejected Personal */
+ MGMT_STATUS_TIMEOUT, /* Host Timeout */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
+ MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
+ MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
+ MGMT_STATUS_DISCONNECTED, /* OE Power Off */
+ MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
+ MGMT_STATUS_BUSY, /* Repeated Attempts */
+ MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
+ MGMT_STATUS_FAILED, /* Unknown LMP PDU */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
+ MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
+ MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
+ MGMT_STATUS_REJECTED, /* Air Mode Rejected */
+ MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
+ MGMT_STATUS_FAILED, /* Unspecified Error */
+ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
+ MGMT_STATUS_FAILED, /* Role Change Not Allowed */
+ MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
+ MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
+ MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
+ MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
+ MGMT_STATUS_FAILED, /* Unit Link Key Used */
+ MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
+ MGMT_STATUS_TIMEOUT, /* Instant Passed */
+ MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
+ MGMT_STATUS_FAILED, /* Transaction Collision */
+ MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
+ MGMT_STATUS_REJECTED, /* QoS Rejected */
+ MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
+ MGMT_STATUS_REJECTED, /* Insufficient Security */
+ MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
+ MGMT_STATUS_BUSY, /* Role Switch Pending */
+ MGMT_STATUS_FAILED, /* Slot Violation */
+ MGMT_STATUS_FAILED, /* Role Switch Failed */
+ MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
+ MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
+ MGMT_STATUS_BUSY, /* Host Busy Pairing */
+ MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
+ MGMT_STATUS_BUSY, /* Controller Busy */
+ MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
+ MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
+ MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
+ MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
+ MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
+};
-static int cmd_status(struct sock *sk, u16 cmd, u8 status)
+static u8 mgmt_status(u8 hci_status)
+{
+ if (hci_status < ARRAY_SIZE(mgmt_status_table))
+ return mgmt_status_table[hci_status];
+
+ return MGMT_STATUS_FAILED;
+}
+
+static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
+ int err;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = (void *) skb_put(skb, sizeof(*hdr));
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
+ hdr->index = cpu_to_le16(index);
hdr->len = cpu_to_le16(sizeof(*ev));
ev = (void *) skb_put(skb, sizeof(*ev));
ev->status = status;
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;
}
-static int read_version(struct sock *sk)
+static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
+ void *rp, size_t rp_len)
{
struct sk_buff *skb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
- struct mgmt_rp_read_version *rp;
+ int err;
BT_DBG("sock %p", sk);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = (void *) skb_put(skb, sizeof(*hdr));
+
hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_VERSION, &ev->opcode);
+ ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
+ ev->opcode = cpu_to_le16(cmd);
+ ev->status = status;
- rp = (void *) skb_put(skb, sizeof(*rp));
- rp->version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp->revision);
+ if (rp)
+ memcpy(ev->data, rp, rp_len);
- if (sock_queue_rcv_skb(sk, skb) < 0)
+ err = sock_queue_rcv_skb(sk, skb);
+ if (err < 0)
kfree_skb(skb);
- return 0;
+ return err;
}
-static int read_index_list(struct sock *sk)
+static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct mgmt_rp_read_version rp;
+
+ BT_DBG("sock %p", sk);
+
+ rp.version = MGMT_VERSION;
+ rp.revision = cpu_to_le16(MGMT_REVISION);
+
+ return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
+ sizeof(rp));
+}
+
+static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct mgmt_rp_read_commands *rp;
+ const u16 num_commands = ARRAY_SIZE(mgmt_commands);
+ const u16 num_events = ARRAY_SIZE(mgmt_events);
+ __le16 *opcode;
+ size_t rp_size;
+ int i, err;
+
+ BT_DBG("sock %p", sk);
+
+ rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ rp->num_commands = cpu_to_le16(num_commands);
+ rp->num_events = cpu_to_le16(num_events);
+
+ for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
+ put_unaligned_le16(mgmt_commands[i], opcode);
+
+ for (i = 0; i < num_events; i++, opcode++)
+ put_unaligned_le16(mgmt_events[i], opcode);
+
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
+ rp_size);
+ kfree(rp);
+
+ return err;
+}
+
+static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_complete *ev;
struct mgmt_rp_read_index_list *rp;
- struct list_head *p;
- size_t body_len;
+ struct hci_dev *d;
+ size_t rp_len;
u16 count;
- int i;
+ int err;
BT_DBG("sock %p", sk);
read_lock(&hci_dev_list_lock);
count = 0;
- list_for_each(p, &hci_dev_list) {
- count++;
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (d->dev_type == HCI_BREDR)
+ count++;
}
- body_len = sizeof(*ev) + sizeof(*rp) + (2 * count);
- skb = alloc_skb(sizeof(*hdr) + body_len, GFP_ATOMIC);
- if (!skb)
+ rp_len = sizeof(*rp) + (2 * count);
+ rp = kmalloc(rp_len, GFP_ATOMIC);
+ if (!rp) {
+ read_unlock(&hci_dev_list_lock);
return -ENOMEM;
+ }
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(body_len);
-
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_INDEX_LIST, &ev->opcode);
+ count = 0;
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (test_bit(HCI_SETUP, &d->dev_flags))
+ continue;
- rp = (void *) skb_put(skb, sizeof(*rp) + (2 * count));
- put_unaligned_le16(count, &rp->num_controllers);
+ if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
+ continue;
- i = 0;
- list_for_each(p, &hci_dev_list) {
- struct hci_dev *d = list_entry(p, struct hci_dev, list);
- put_unaligned_le16(d->id, &rp->index[i++]);
- BT_DBG("Added hci%u", d->id);
+ if (d->dev_type == HCI_BREDR) {
+ rp->index[count++] = cpu_to_le16(d->id);
+ BT_DBG("Added hci%u", d->id);
+ }
}
+ rp->num_controllers = cpu_to_le16(count);
+ rp_len = sizeof(*rp) + (2 * count);
+
read_unlock(&hci_dev_list_lock);
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
+ rp_len);
+
+ kfree(rp);
+
+ return err;
+}
+
+static u32 get_supported_settings(struct hci_dev *hdev)
+{
+ u32 settings = 0;
+
+ settings |= MGMT_SETTING_POWERED;
+ settings |= MGMT_SETTING_PAIRABLE;
+ settings |= MGMT_SETTING_DEBUG_KEYS;
+
+ if (lmp_bredr_capable(hdev)) {
+ settings |= MGMT_SETTING_CONNECTABLE;
+ if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
+ settings |= MGMT_SETTING_FAST_CONNECTABLE;
+ settings |= MGMT_SETTING_DISCOVERABLE;
+ settings |= MGMT_SETTING_BREDR;
+ settings |= MGMT_SETTING_LINK_SECURITY;
+
+ if (lmp_ssp_capable(hdev)) {
+ settings |= MGMT_SETTING_SSP;
+ settings |= MGMT_SETTING_HS;
+ }
+
+ if (lmp_sc_capable(hdev) ||
+ test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+ settings |= MGMT_SETTING_SECURE_CONN;
+ }
+
+ if (lmp_le_capable(hdev)) {
+ settings |= MGMT_SETTING_LE;
+ settings |= MGMT_SETTING_ADVERTISING;
+ settings |= MGMT_SETTING_PRIVACY;
+ }
+
+ return settings;
+}
+
+static u32 get_current_settings(struct hci_dev *hdev)
+{
+ u32 settings = 0;
+
+ if (hdev_is_powered(hdev))
+ settings |= MGMT_SETTING_POWERED;
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_CONNECTABLE;
+
+ if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_FAST_CONNECTABLE;
+
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_DISCOVERABLE;
+
+ if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
+ settings |= MGMT_SETTING_PAIRABLE;
+
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_BREDR;
+
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_LE;
+
+ if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
+ settings |= MGMT_SETTING_LINK_SECURITY;
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_SSP;
+
+ if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_HS;
+
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ settings |= MGMT_SETTING_ADVERTISING;
+
+ if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+ settings |= MGMT_SETTING_SECURE_CONN;
+
+ if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
+ settings |= MGMT_SETTING_DEBUG_KEYS;
+
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ settings |= MGMT_SETTING_PRIVACY;
+
+ return settings;
+}
+
+#define PNP_INFO_SVCLASS_ID 0x1200
+
+static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
+
+ if (len < 4)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ u16 uuid16;
+
+ if (uuid->size != 16)
+ continue;
+
+ uuid16 = get_unaligned_le16(&uuid->uuid[12]);
+ if (uuid16 < 0x1100)
+ continue;
+
+ if (uuid16 == PNP_INFO_SVCLASS_ID)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID16_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + sizeof(u16) > len) {
+ uuids_start[1] = EIR_UUID16_SOME;
+ break;
+ }
+
+ *ptr++ = (uuid16 & 0x00ff);
+ *ptr++ = (uuid16 & 0xff00) >> 8;
+ uuids_start[0] += sizeof(uuid16);
+ }
+
+ return ptr;
+}
+
+static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
+
+ if (len < 6)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ if (uuid->size != 32)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID32_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + sizeof(u32) > len) {
+ uuids_start[1] = EIR_UUID32_SOME;
+ break;
+ }
+
+ memcpy(ptr, &uuid->uuid[12], sizeof(u32));
+ ptr += sizeof(u32);
+ uuids_start[0] += sizeof(u32);
+ }
+
+ return ptr;
+}
+
+static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
+{
+ u8 *ptr = data, *uuids_start = NULL;
+ struct bt_uuid *uuid;
+
+ if (len < 18)
+ return ptr;
+
+ list_for_each_entry(uuid, &hdev->uuids, list) {
+ if (uuid->size != 128)
+ continue;
+
+ if (!uuids_start) {
+ uuids_start = ptr;
+ uuids_start[0] = 1;
+ uuids_start[1] = EIR_UUID128_ALL;
+ ptr += 2;
+ }
+
+ /* Stop if not enough space to put next UUID */
+ if ((ptr - data) + 16 > len) {
+ uuids_start[1] = EIR_UUID128_SOME;
+ break;
+ }
+
+ memcpy(ptr, uuid->uuid, 16);
+ ptr += 16;
+ uuids_start[0] += 16;
+ }
+
+ return ptr;
+}
+
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->opcode == opcode)
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+ u8 ad_len = 0;
+ size_t name_len;
+
+ name_len = strlen(hdev->dev_name);
+ if (name_len > 0) {
+ size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+
+ if (name_len > max_len) {
+ name_len = max_len;
+ ptr[1] = EIR_NAME_SHORT;
+ } else
+ ptr[1] = EIR_NAME_COMPLETE;
+
+ ptr[0] = name_len + 1;
+
+ memcpy(ptr + 2, hdev->dev_name, name_len);
+
+ ad_len += (name_len + 2);
+ ptr += (name_len + 2);
+ }
+
+ return ad_len;
+}
+
+static void update_scan_rsp_data(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_scan_rsp_data cp;
+ u8 len;
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = create_scan_rsp_data(hdev, cp.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
+ return;
+
+ memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ hdev->scan_rsp_data_len = len;
+
+ cp.length = len;
+
+ hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
+}
+
+static u8 get_adv_discov_flags(struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ /* If there's a pending mgmt command the flags will not yet have
+ * their final values, so check for this first.
+ */
+ cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+ if (cmd) {
+ struct mgmt_mode *cp = cmd->param;
+ if (cp->val == 0x01)
+ return LE_AD_GENERAL;
+ else if (cp->val == 0x02)
+ return LE_AD_LIMITED;
+ } else {
+ if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+ return LE_AD_LIMITED;
+ else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ return LE_AD_GENERAL;
+ }
return 0;
}
-static int read_controller_info(struct sock *sk, unsigned char *data, u16 len)
+static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- struct mgmt_ev_cmd_complete *ev;
- struct mgmt_rp_read_info *rp;
- struct mgmt_cp_read_info *cp;
- struct hci_dev *hdev;
- u16 dev_id;
+ u8 ad_len = 0, flags = 0;
- BT_DBG("sock %p", sk);
+ flags |= get_adv_discov_flags(hdev);
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ flags |= LE_AD_NO_BREDR;
+
+ if (flags) {
+ BT_DBG("adv flags 0x%02x", flags);
+
+ ptr[0] = 2;
+ ptr[1] = EIR_FLAGS;
+ ptr[2] = flags;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+
+ if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->adv_tx_power;
+
+ ad_len += 3;
+ ptr += 3;
+ }
+
+ return ad_len;
+}
+
+static void update_adv_data(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_adv_data cp;
+ u8 len;
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = create_adv_data(hdev, cp.data);
+
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return;
+
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
+
+ cp.length = len;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+}
+
+static void create_eir(struct hci_dev *hdev, u8 *data)
+{
+ u8 *ptr = data;
+ size_t name_len;
+
+ name_len = strlen(hdev->dev_name);
+
+ if (name_len > 0) {
+ /* EIR Data type */
+ if (name_len > 48) {
+ name_len = 48;
+ ptr[1] = EIR_NAME_SHORT;
+ } else
+ ptr[1] = EIR_NAME_COMPLETE;
+
+ /* EIR Data length */
+ ptr[0] = name_len + 1;
+
+ memcpy(ptr + 2, hdev->dev_name, name_len);
+
+ ptr += (name_len + 2);
+ }
+
+ if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->inq_tx_power;
- if (len != 2)
- return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL);
+ ptr += 3;
+ }
+
+ if (hdev->devid_source > 0) {
+ ptr[0] = 9;
+ ptr[1] = EIR_DEVICE_ID;
+
+ put_unaligned_le16(hdev->devid_source, ptr + 2);
+ put_unaligned_le16(hdev->devid_vendor, ptr + 4);
+ put_unaligned_le16(hdev->devid_product, ptr + 6);
+ put_unaligned_le16(hdev->devid_version, ptr + 8);
+
+ ptr += 10;
+ }
+
+ ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+ ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+ ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
+}
+
+static void update_eir(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_eir cp;
+
+ if (!hdev_is_powered(hdev))
+ return;
+
+ if (!lmp_ext_inq_capable(hdev))
+ return;
+
+ if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ return;
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+
+ create_eir(hdev, cp.data);
+
+ if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
+ return;
+
+ memcpy(hdev->eir, cp.data, sizeof(cp.data));
+
+ hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct bt_uuid *uuid;
+ u8 val = 0;
+
+ list_for_each_entry(uuid, &hdev->uuids, list)
+ val |= uuid->svc_hint;
+
+ return val;
+}
+
+static void update_class(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 cod[3];
+
+ BT_DBG("%s", hdev->name);
+
+ if (!hdev_is_powered(hdev))
+ return;
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return;
+
+ if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ return;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+ cod[1] |= 0x20;
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return;
+
+ hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
+}
+
+static bool get_connectable(struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ /* If there's a pending mgmt command the flag will not yet have
+ * it's final value, so check for this first.
+ */
+ cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+ if (cmd) {
+ struct mgmt_mode *cp = cmd->param;
+ return cp->val;
+ }
+
+ return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+}
+
+static void enable_advertising(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_le_set_adv_param cp;
+ u8 own_addr_type, enable = 0x01;
+ bool connectable;
+
+ /* Clear the HCI_ADVERTISING bit temporarily so that the
+ * hci_update_random_address knows that it's safe to go ahead
+ * and write a new random address. The flag will be set back on
+ * as soon as the SET_ADV_ENABLE HCI command completes.
+ */
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+ connectable = get_connectable(hdev);
+
+ /* Set require_privacy to true only when non-connectable
+ * advertising is used. In that case it is fine to use a
+ * non-resolvable private address.
+ */
+ if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
+ return;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.min_interval = cpu_to_le16(0x0800);
+ cp.max_interval = cpu_to_le16(0x0800);
+ cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+ cp.own_address_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
+static void disable_advertising(struct hci_request *req)
+{
+ u8 enable = 0x00;
+
+ hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
+static void service_cache_off(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ service_cache.work);
+ struct hci_request req;
+
+ if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
+ return;
+
+ hci_req_init(&req, hdev);
+
+ hci_dev_lock(hdev);
+
+ update_eir(&req);
+ update_class(&req);
+
+ hci_dev_unlock(hdev);
+
+ hci_req_run(&req, NULL);
+}
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + sizeof(*rp), GFP_ATOMIC);
+static void rpa_expired(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ rpa_expired.work);
+ struct hci_request req;
+
+ BT_DBG("");
+
+ set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+
+ if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
+ hci_conn_num(hdev, LE_LINK) > 0)
+ return;
+
+ /* The generation of a new RPA and programming it into the
+ * controller happens in the enable_advertising() function.
+ */
+
+ hci_req_init(&req, hdev);
+
+ disable_advertising(&req);
+ enable_advertising(&req);
+
+ hci_req_run(&req, NULL);
+}
+
+static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
+{
+ if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
+ return;
+
+ INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+ INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
+
+ /* Non-mgmt controlled devices get this bit set
+ * implicitly so that pairing works for them, however
+ * for mgmt we require user-space to explicitly enable
+ * it
+ */
+ clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+}
+
+static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct mgmt_rp_read_info rp;
+
+ BT_DBG("sock %p %s", sk, hdev->name);
+
+ hci_dev_lock(hdev);
+
+ memset(&rp, 0, sizeof(rp));
+
+ bacpy(&rp.bdaddr, &hdev->bdaddr);
+
+ rp.version = hdev->hci_ver;
+ rp.manufacturer = cpu_to_le16(hdev->manufacturer);
+
+ rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
+ rp.current_settings = cpu_to_le32(get_current_settings(hdev));
+
+ memcpy(rp.dev_class, hdev->dev_class, 3);
+
+ memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
+ memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
+
+ hci_dev_unlock(hdev);
+
+ return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
+ sizeof(rp));
+}
+
+static void mgmt_pending_free(struct pending_cmd *cmd)
+{
+ sock_put(cmd->sk);
+ kfree(cmd->param);
+ kfree(cmd);
+}
+
+static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct pending_cmd *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->opcode = opcode;
+ cmd->index = hdev->id;
+
+ cmd->param = kmalloc(len, GFP_KERNEL);
+ if (!cmd->param) {
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (data)
+ memcpy(cmd->param, data, len);
+
+ cmd->sk = sk;
+ sock_hold(sk);
+
+ list_add(&cmd->list, &hdev->mgmt_pending);
+
+ return cmd;
+}
+
+static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
+ void (*cb)(struct pending_cmd *cmd,
+ void *data),
+ void *data)
+{
+ struct pending_cmd *cmd, *tmp;
+
+ list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
+ if (opcode > 0 && cmd->opcode != opcode)
+ continue;
+
+ cb(cmd, data);
+ }
+}
+
+static void mgmt_pending_remove(struct pending_cmd *cmd)
+{
+ list_del(&cmd->list);
+ mgmt_pending_free(cmd);
+}
+
+static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
+{
+ __le32 settings = cpu_to_le32(get_current_settings(hdev));
+
+ return cmd_complete(sk, hdev->id, opcode, 0, &settings,
+ sizeof(settings));
+}
+
+static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("%s status 0x%02x", hdev->name, status);
+
+ if (hci_conn_count(hdev) == 0) {
+ cancel_delayed_work(&hdev->power_off);
+ queue_work(hdev->req_workqueue, &hdev->power_off.work);
+ }
+}
+
+static void hci_stop_discovery(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_remote_name_req_cancel cp;
+ struct inquiry_entry *e;
+
+ switch (hdev->discovery.state) {
+ case DISCOVERY_FINDING:
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+ } else {
+ cancel_delayed_work(&hdev->le_scan_disable);
+ hci_req_add_le_scan_disable(req);
+ }
+
+ break;
+
+ case DISCOVERY_RESOLVING:
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e)
+ return;
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
+ &cp);
+
+ break;
+
+ default:
+ /* Passive scanning */
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ hci_req_add_le_scan_disable(req);
+ break;
+ }
+}
+
+static int clean_up_hci_state(struct hci_dev *hdev)
+{
+ struct hci_request req;
+ struct hci_conn *conn;
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_ISCAN, &hdev->flags) ||
+ test_bit(HCI_PSCAN, &hdev->flags)) {
+ u8 scan = 0x00;
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ }
+
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ disable_advertising(&req);
+
+ hci_stop_discovery(&req);
+
+ list_for_each_entry(conn, &hdev->conn_hash.list, list) {
+ struct hci_cp_disconnect dc;
+ struct hci_cp_reject_conn_req rej;
+
+ switch (conn->state) {
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ dc.handle = cpu_to_le16(conn->handle);
+ dc.reason = 0x15; /* Terminated due to Power Off */
+ hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ break;
+ case BT_CONNECT:
+ if (conn->type == LE_LINK)
+ hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
+ 0, NULL);
+ else if (conn->type == ACL_LINK)
+ hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
+ 6, &conn->dst);
+ break;
+ case BT_CONNECT2:
+ bacpy(&rej.bdaddr, &conn->dst);
+ rej.reason = 0x15; /* Terminated due to Power Off */
+ if (conn->type == ACL_LINK)
+ hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
+ sizeof(rej), &rej);
+ else if (conn->type == SCO_LINK)
+ hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
+ sizeof(rej), &rej);
+ break;
+ }
+ }
+
+ return hci_req_run(&req, clean_up_hci_complete);
+}
+
+static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+ cancel_delayed_work(&hdev->power_off);
+
+ if (cp->val) {
+ mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
+ data, len);
+ err = mgmt_powered(hdev, 1);
+ goto failed;
+ }
+ }
+
+ if (!!cp->val == hdev_is_powered(hdev)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ if (cp->val) {
+ queue_work(hdev->req_workqueue, &hdev->power_on);
+ err = 0;
+ } else {
+ /* Disconnect connections, stop scans, etc */
+ err = clean_up_hci_state(hdev);
+ if (!err)
+ queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+ HCI_POWER_OFF_TIMEOUT);
+
+ /* ENODATA means there were no HCI commands queued */
+ if (err == -ENODATA) {
+ cancel_delayed_work(&hdev->power_off);
+ queue_work(hdev->req_workqueue, &hdev->power_off.work);
+ err = 0;
+ }
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
+ struct sock *skip_sk)
+{
+ struct sk_buff *skb;
+ struct mgmt_hdr *hdr;
+
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
- hdr->len = cpu_to_le16(sizeof(*ev) + sizeof(*rp));
+ hdr->opcode = cpu_to_le16(event);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+ hdr->len = cpu_to_le16(data_len);
- ev = (void *) skb_put(skb, sizeof(*ev));
- put_unaligned_le16(MGMT_OP_READ_INFO, &ev->opcode);
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
- rp = (void *) skb_put(skb, sizeof(*rp));
+ /* Time stamp */
+ __net_timestamp(skb);
- cp = (void *) data;
- dev_id = get_unaligned_le16(&cp->index);
+ hci_send_to_control(skb, skip_sk);
+ kfree_skb(skb);
- BT_DBG("request for hci%u", dev_id);
+ return 0;
+}
- hdev = hci_dev_get(dev_id);
- if (!hdev) {
- kfree_skb(skb);
- return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV);
+static int new_settings(struct hci_dev *hdev, struct sock *skip)
+{
+ __le32 ev;
+
+ ev = cpu_to_le32(get_current_settings(hdev));
+
+ return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
+}
+
+struct cmd_lookup {
+ struct sock *sk;
+ struct hci_dev *hdev;
+ u8 mgmt_status;
+};
+
+static void settings_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct cmd_lookup *match = data;
+
+ send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
+
+ list_del(&cmd->list);
+
+ if (match->sk == NULL) {
+ match->sk = cmd->sk;
+ sock_hold(match->sk);
}
- hci_dev_lock_bh(hdev);
+ mgmt_pending_free(cmd);
+}
- put_unaligned_le16(hdev->id, &rp->index);
- rp->type = hdev->dev_type;
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+{
+ u8 *status = data;
- rp->powered = test_bit(HCI_UP, &hdev->flags);
- rp->discoverable = test_bit(HCI_ISCAN, &hdev->flags);
- rp->pairable = test_bit(HCI_PSCAN, &hdev->flags);
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+ mgmt_pending_remove(cmd);
+}
- if (test_bit(HCI_AUTH, &hdev->flags))
- rp->sec_mode = 3;
- else if (hdev->ssp_mode > 0)
- rp->sec_mode = 4;
+static u8 mgmt_bredr_support(struct hci_dev *hdev)
+{
+ if (!lmp_bredr_capable(hdev))
+ return MGMT_STATUS_NOT_SUPPORTED;
+ else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return MGMT_STATUS_REJECTED;
+ else
+ return MGMT_STATUS_SUCCESS;
+}
+
+static u8 mgmt_le_support(struct hci_dev *hdev)
+{
+ if (!lmp_le_capable(hdev))
+ return MGMT_STATUS_NOT_SUPPORTED;
+ else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return MGMT_STATUS_REJECTED;
else
- rp->sec_mode = 2;
+ return MGMT_STATUS_SUCCESS;
+}
- bacpy(&rp->bdaddr, &hdev->bdaddr);
- memcpy(rp->features, hdev->features, 8);
- memcpy(rp->dev_class, hdev->dev_class, 3);
- put_unaligned_le16(hdev->manufacturer, &rp->manufacturer);
- rp->hci_ver = hdev->hci_ver;
- put_unaligned_le16(hdev->hci_rev, &rp->hci_rev);
+static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_mode *cp;
+ struct hci_request req;
+ bool changed;
- hci_dev_unlock_bh(hdev);
- hci_dev_put(hdev);
+ BT_DBG("status 0x%02x", status);
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ goto remove_cmd;
+ }
+
+ cp = cmd->param;
+ if (cp->val) {
+ changed = !test_and_set_bit(HCI_DISCOVERABLE,
+ &hdev->dev_flags);
+
+ if (hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
+ } else {
+ changed = test_and_clear_bit(HCI_DISCOVERABLE,
+ &hdev->dev_flags);
+ }
+
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+
+ if (changed)
+ new_settings(hdev, cmd->sk);
+
+ /* When the discoverable mode gets changed, make sure
+ * that class of device has the limited discoverable
+ * bit correctly set.
+ */
+ hci_req_init(&req, hdev);
+ update_class(&req);
+ hci_req_run(&req, NULL);
+
+remove_cmd:
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_discoverable *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ u16 timeout;
+ u8 scan;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_REJECTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ timeout = __le16_to_cpu(cp->timeout);
+
+ /* Disabling discoverable requires that no timeout is set,
+ * and enabling limited discoverable requires a timeout.
+ */
+ if ((cp->val == 0x00 && timeout > 0) ||
+ (cp->val == 0x02 && timeout == 0))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev) && timeout > 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_REJECTED);
+ goto failed;
+ }
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ /* Setting limited discoverable when powered off is
+ * not a valid operation since it requires a timeout
+ * and so no need to check HCI_LIMITED_DISCOVERABLE.
+ */
+ if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
+ change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ /* If the current mode is the same, then just update the timeout
+ * value with the new value. And if only the timeout gets updated,
+ * then no need for any HCI transactions.
+ */
+ if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
+ (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
+ &hdev->dev_flags)) {
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = timeout;
+
+ if (cp->val && hdev->discov_timeout > 0) {
+ int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+ queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+ to);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Cancel any potential discoverable timeout that might be
+ * still active and store new timeout value. The arming of
+ * the timeout happens in the complete handler.
+ */
+ cancel_delayed_work(&hdev->discov_off);
+ hdev->discov_timeout = timeout;
+
+ /* Limited discoverable mode */
+ if (cp->val == 0x02)
+ set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ else
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+
+ hci_req_init(&req, hdev);
+
+ /* The procedure for LE-only controllers is much simpler - just
+ * update the advertising data.
+ */
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ goto update_ad;
+
+ scan = SCAN_PAGE;
+
+ if (cp->val) {
+ struct hci_cp_write_current_iac_lap hci_cp;
+
+ if (cp->val == 0x02) {
+ /* Limited discoverable mode */
+ hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
+ hci_cp.iac_lap[0] = 0x00; /* LIAC */
+ hci_cp.iac_lap[1] = 0x8b;
+ hci_cp.iac_lap[2] = 0x9e;
+ hci_cp.iac_lap[3] = 0x33; /* GIAC */
+ hci_cp.iac_lap[4] = 0x8b;
+ hci_cp.iac_lap[5] = 0x9e;
+ } else {
+ /* General discoverable mode */
+ hci_cp.num_iac = 1;
+ hci_cp.iac_lap[0] = 0x33; /* GIAC */
+ hci_cp.iac_lap[1] = 0x8b;
+ hci_cp.iac_lap[2] = 0x9e;
+ }
+
+ hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
+ (hci_cp.num_iac * 3) + 1, &hci_cp);
+
+ scan |= SCAN_INQUIRY;
+ } else {
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
+
+update_ad:
+ update_adv_data(&req);
+
+ err = hci_req_run(&req, set_discoverable_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static void write_fast_connectable(struct hci_request *req, bool enable)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_page_scan_activity acp;
+ u8 type;
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return;
+
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return;
+
+ if (enable) {
+ type = PAGE_SCAN_TYPE_INTERLACED;
+
+ /* 160 msec page scan interval */
+ acp.interval = cpu_to_le16(0x0100);
+ } else {
+ type = PAGE_SCAN_TYPE_STANDARD; /* default */
+
+ /* default 1.28 sec page scan */
+ acp.interval = cpu_to_le16(0x0800);
+ }
+
+ acp.window = cpu_to_le16(0x0012);
+
+ if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
+ __cpu_to_le16(hdev->page_scan_window) != acp.window)
+ hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
+ sizeof(acp), &acp);
+
+ if (hdev->page_scan_type != type)
+ hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
+}
+
+static void set_connectable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_mode *cp;
+ bool changed;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ goto remove_cmd;
+ }
+
+ cp = cmd->param;
+ if (cp->val)
+ changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
+
+ if (changed)
+ new_settings(hdev, cmd->sk);
+
+remove_cmd:
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_connectable_update_settings(struct hci_dev *hdev,
+ struct sock *sk, u8 val)
+{
+ bool changed = false;
+ int err;
+
+ if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ changed = true;
+
+ if (val) {
+ set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ } else {
+ clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+ if (err < 0)
+ return err;
+
+ if (changed)
+ return new_settings(hdev, sk);
+
+ return 0;
+}
+
+static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ u8 scan;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_REJECTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = set_connectable_update_settings(hdev, sk, cp->val);
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ hci_req_init(&req, hdev);
+
+ /* If BR/EDR is not enabled and we disable advertising as a
+ * by-product of disabling connectable, we need to update the
+ * advertising flags.
+ */
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ if (!cp->val) {
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
+ update_adv_data(&req);
+ } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
+ if (cp->val) {
+ scan = SCAN_PAGE;
+ } else {
+ scan = 0;
+
+ if (test_bit(HCI_ISCAN, &hdev->flags) &&
+ hdev->discov_timeout > 0)
+ cancel_delayed_work(&hdev->discov_off);
+ }
+
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ }
+
+ /* If we're going from non-connectable to connectable or
+ * vice-versa when fast connectable is enabled ensure that fast
+ * connectable gets disabled. write_fast_connectable won't do
+ * anything if the page scan parameters are already what they
+ * should be.
+ */
+ if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
+ write_fast_connectable(&req, false);
+
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
+ hci_conn_num(hdev, LE_LINK) == 0) {
+ disable_advertising(&req);
+ enable_advertising(&req);
+ }
+
+ err = hci_req_run(&req, set_connectable_complete);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ if (err == -ENODATA)
+ err = set_connectable_update_settings(hdev, sk,
+ cp->val);
+ goto failed;
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ bool changed;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (cp->val)
+ changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ u8 val, status;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ status = mgmt_bredr_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ status);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed = false;
+
+ if (!!cp->val != test_bit(HCI_LINK_SECURITY,
+ &hdev->dev_flags)) {
+ change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ val = !!cp->val;
+
+ if (test_bit(HCI_AUTH, &hdev->flags) == val) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ u8 status;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ status = mgmt_bredr_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
+
+ if (!lmp_ssp_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed;
+
+ if (cp->val) {
+ changed = !test_and_set_bit(HCI_SSP_ENABLED,
+ &hdev->dev_flags);
+ } else {
+ changed = test_and_clear_bit(HCI_SSP_ENABLED,
+ &hdev->dev_flags);
+ if (!changed)
+ changed = test_and_clear_bit(HCI_HS_ENABLED,
+ &hdev->dev_flags);
+ else
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ bool changed;
+ u8 status;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ status = mgmt_bredr_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
+
+ if (!lmp_ssp_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_REJECTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (cp->val) {
+ changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ } else {
+ if (hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static void le_enable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+ &mgmt_err);
+ return;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
+
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ /* Make sure the controller has a good default for
+ * advertising data. Restrict the update to when LE
+ * has actually been enabled. During power on, the
+ * update in powered_update_hci will take care of it.
+ */
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ struct hci_request req;
+
+ hci_dev_lock(hdev);
+
+ hci_req_init(&req, hdev);
+ update_adv_data(&req);
+ update_scan_rsp_data(&req);
+ hci_req_run(&req, NULL);
+
+ hci_dev_unlock(hdev);
+ }
+}
+
+static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct hci_cp_write_le_host_supported hci_cp;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+ u8 val, enabled;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* LE-only devices do not allow toggling LE on/off */
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+
+ val = !!cp->val;
+ enabled = lmp_host_le_capable(hdev);
+
+ if (!hdev_is_powered(hdev) || val == enabled) {
+ bool changed = false;
+
+ if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+ changed = true;
+ }
+
+ if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ memset(&hci_cp, 0, sizeof(hci_cp));
+
+ if (val) {
+ hci_cp.le = val;
+ hci_cp.simul = lmp_le_br_capable(hdev);
+ } else {
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ disable_advertising(&req);
+ }
+
+ hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
+ &hci_cp);
+
+ err = hci_req_run(&req, le_enable_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+/* This is a helper function to test for pending mgmt commands that can
+ * cause CoD or EIR HCI commands. We can only allow one such pending
+ * mgmt command at a time since otherwise we cannot easily track what
+ * the current values are, will be, and based on that calculate if a new
+ * HCI command needs to be sent and if yes with what value.
+ */
+static bool pending_eir_or_class(struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ switch (cmd->opcode) {
+ case MGMT_OP_ADD_UUID:
+ case MGMT_OP_REMOVE_UUID:
+ case MGMT_OP_SET_DEV_CLASS:
+ case MGMT_OP_SET_POWERED:
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const u8 bluetooth_base_uuid[] = {
+ 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static u8 get_uuid_size(const u8 *uuid)
+{
+ u32 val;
+
+ if (memcmp(uuid, bluetooth_base_uuid, 12))
+ return 128;
+
+ val = get_unaligned_le32(&uuid[12]);
+ if (val > 0xffff)
+ return 32;
+
+ return 16;
+}
+
+static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(mgmt_op, hdev);
+ if (!cmd)
+ goto unlock;
+
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
+ hdev->dev_class, 3);
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static void add_uuid_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("status 0x%02x", status);
+
+ mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
+}
+
+static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_cp_add_uuid *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ struct bt_uuid *uuid;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (pending_eir_or_class(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
+ if (!uuid) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ memcpy(uuid->uuid, cp->uuid, 16);
+ uuid->svc_hint = cp->svc_hint;
+ uuid->size = get_uuid_size(cp->uuid);
+
+ list_add_tail(&uuid->list, &hdev->uuids);
+
+ hci_req_init(&req, hdev);
+
+ update_class(&req);
+ update_eir(&req);
+
+ err = hci_req_run(&req, add_uuid_complete);
+ if (err < 0) {
+ if (err != -ENODATA)
+ goto failed;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
+ hdev->dev_class, 3);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = 0;
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static bool enable_service_cache(struct hci_dev *hdev)
+{
+ if (!hdev_is_powered(hdev))
+ return false;
+
+ if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+ queue_delayed_work(hdev->workqueue, &hdev->service_cache,
+ CACHE_TIMEOUT);
+ return true;
+ }
+
+ return false;
+}
+
+static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("status 0x%02x", status);
+
+ mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
+}
+
+static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_remove_uuid *cp = data;
+ struct pending_cmd *cmd;
+ struct bt_uuid *match, *tmp;
+ u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ struct hci_request req;
+ int err, found;
+
+ BT_DBG("request for %s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (pending_eir_or_class(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
+ hci_uuids_clear(hdev);
+
+ if (enable_service_cache(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ 0, hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ goto update_class;
+ }
+
+ found = 0;
+
+ list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
+ if (memcmp(match->uuid, cp->uuid, 16) != 0)
+ continue;
+
+ list_del(&match->list);
+ kfree(match);
+ found++;
+ }
+
+ if (found == 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+update_class:
+ hci_req_init(&req, hdev);
+
+ update_class(&req);
+ update_eir(&req);
+
+ err = hci_req_run(&req, remove_uuid_complete);
+ if (err < 0) {
+ if (err != -ENODATA)
+ goto unlock;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = 0;
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static void set_class_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("status 0x%02x", status);
+
+ mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
+}
+
+static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_dev_class *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ hci_dev_lock(hdev);
+
+ if (pending_eir_or_class(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ hdev->major_class = cp->major;
+ hdev->minor_class = cp->minor;
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
+ hci_dev_unlock(hdev);
+ cancel_delayed_work_sync(&hdev->service_cache);
+ hci_dev_lock(hdev);
+ update_eir(&req);
+ }
+
+ update_class(&req);
+
+ err = hci_req_run(&req, set_class_complete);
+ if (err < 0) {
+ if (err != -ENODATA)
+ goto unlock;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
+ hdev->dev_class, 3);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = 0;
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_load_link_keys *cp = data;
+ u16 key_count, expected_len;
+ bool changed;
+ int i;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ key_count = __le16_to_cpu(cp->key_count);
+
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_link_key_info);
+ if (expected_len != len) {
+ BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
+ expected_len, len);
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
+ key_count);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_link_key_info *key = &cp->keys[i];
+
+ if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_lock(hdev);
+
+ hci_link_keys_clear(hdev);
+
+ if (cp->debug_keys)
+ changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+
+ if (changed)
+ new_settings(hdev, NULL);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_link_key_info *key = &cp->keys[i];
+
+ hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
+ key->type, key->pin_len);
+ }
+
+ cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
+
+ hci_dev_unlock(hdev);
return 0;
}
+static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type, struct sock *skip_sk)
+{
+ struct mgmt_ev_device_unpaired ev;
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = addr_type;
+
+ return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
+ skip_sk);
+}
+
+static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_unpair_device *cp = data;
+ struct mgmt_rp_unpair_device rp;
+ struct hci_cp_disconnect dc;
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
+
+ if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
+ return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ if (cp->addr.type == BDADDR_BREDR) {
+ err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
+ } else {
+ u8 addr_type;
+
+ if (cp->addr.type == BDADDR_LE_PUBLIC)
+ addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ addr_type = ADDR_LE_DEV_RANDOM;
+
+ hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
+
+ hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
+
+ err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+ }
+
+ if (err < 0) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
+ MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ if (cp->disconnect) {
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
+ &cp->addr.bdaddr);
+ } else {
+ conn = NULL;
+ }
+
+ if (!conn) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
+ &rp, sizeof(rp));
+ device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
+ sizeof(*cp));
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ dc.handle = cpu_to_le16(conn->handle);
+ dc.reason = 0x13; /* Remote User Terminated Connection */
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_disconnect *cp = data;
+ struct mgmt_rp_disconnect rp;
+ struct hci_cp_disconnect dc;
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ BT_DBG("");
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
+
+ hci_dev_lock(hdev);
+
+ if (!test_bit(HCI_UP, &hdev->flags)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
+ goto failed;
+ }
+
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
+
+ if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
+ MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ dc.handle = cpu_to_le16(conn->handle);
+ dc.reason = HCI_ERROR_REMOTE_USER_TERM;
+
+ err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
+{
+ switch (link_type) {
+ case LE_LINK:
+ switch (addr_type) {
+ case ADDR_LE_DEV_PUBLIC:
+ return BDADDR_LE_PUBLIC;
+
+ default:
+ /* Fallback to LE Random address type */
+ return BDADDR_LE_RANDOM;
+ }
+
+ default:
+ /* Fallback to BR/EDR type */
+ return BDADDR_BREDR;
+ }
+}
+
+static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct mgmt_rp_get_connections *rp;
+ struct hci_conn *c;
+ size_t rp_len;
+ int err;
+ u16 i;
+
+ BT_DBG("");
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
+ }
+
+ i = 0;
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
+ i++;
+ }
+
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+ rp = kmalloc(rp_len, GFP_KERNEL);
+ if (!rp) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ i = 0;
+ list_for_each_entry(c, &hdev->conn_hash.list, list) {
+ if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
+ continue;
+ bacpy(&rp->addr[i].bdaddr, &c->dst);
+ rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
+ if (c->type == SCO_LINK || c->type == ESCO_LINK)
+ continue;
+ i++;
+ }
+
+ rp->conn_count = cpu_to_le16(i);
+
+ /* Recalculate length in case of filtered SCO connections, etc */
+ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
+ rp_len);
+
+ kfree(rp);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_pin_code_neg_reply *cp)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
+ sizeof(*cp));
+ if (!cmd)
+ return -ENOMEM;
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
+ sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct hci_conn *conn;
+ struct mgmt_cp_pin_code_reply *cp = data;
+ struct hci_cp_pin_code_reply reply;
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("");
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
+ if (!conn) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_NOT_CONNECTED);
+ goto failed;
+ }
+
+ if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
+ struct mgmt_cp_pin_code_neg_reply ncp;
+
+ memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
+
+ BT_ERR("PIN code is not 16 bytes long");
+
+ err = send_pin_code_neg_reply(sk, hdev, &ncp);
+ if (err >= 0)
+ err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ bacpy(&reply.bdaddr, &cp->addr.bdaddr);
+ reply.pin_len = cp->pin_len;
+ memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
+
+ err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_io_capability *cp = data;
+
+ BT_DBG("");
+
+ hci_dev_lock(hdev);
+
+ hdev->io_capability = cp->io_capability;
+
+ BT_DBG("%s IO capability set to 0x%02x", hdev->name,
+ hdev->io_capability);
+
+ hci_dev_unlock(hdev);
+
+ return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
+ 0);
+}
+
+static struct pending_cmd *find_pairing(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+ struct pending_cmd *cmd;
+
+ list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+ if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
+ continue;
+
+ if (cmd->user_data != conn)
+ continue;
+
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static void pairing_complete(struct pending_cmd *cmd, u8 status)
+{
+ struct mgmt_rp_pair_device rp;
+ struct hci_conn *conn = cmd->user_data;
+
+ bacpy(&rp.addr.bdaddr, &conn->dst);
+ rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
+ &rp, sizeof(rp));
+
+ /* So we don't get further callbacks for this connection */
+ conn->connect_cfm_cb = NULL;
+ conn->security_cfm_cb = NULL;
+ conn->disconn_cfm_cb = NULL;
+
+ hci_conn_drop(conn);
+
+ mgmt_pending_remove(cmd);
+}
+
+void mgmt_smp_complete(struct hci_conn *conn, bool complete)
+{
+ u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
+ struct pending_cmd *cmd;
+
+ cmd = find_pairing(conn);
+ if (cmd)
+ pairing_complete(cmd, status);
+}
+
+static void pairing_complete_cb(struct hci_conn *conn, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status %u", status);
+
+ cmd = find_pairing(conn);
+ if (!cmd)
+ BT_DBG("Unable to find a pending command");
+ else
+ pairing_complete(cmd, mgmt_status(status));
+}
+
+static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status %u", status);
+
+ if (!status)
+ return;
+
+ cmd = find_pairing(conn);
+ if (!cmd)
+ BT_DBG("Unable to find a pending command");
+ else
+ pairing_complete(cmd, mgmt_status(status));
+}
+
+static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_pair_device *cp = data;
+ struct mgmt_rp_pair_device rp;
+ struct pending_cmd *cmd;
+ u8 sec_level, auth_type;
+ struct hci_conn *conn;
+ int err;
+
+ BT_DBG("");
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ sec_level = BT_SECURITY_MEDIUM;
+ auth_type = HCI_AT_DEDICATED_BONDING;
+
+ if (cp->addr.type == BDADDR_BREDR) {
+ conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
+ auth_type);
+ } else {
+ u8 addr_type;
+
+ /* Convert from L2CAP channel address type to HCI address type
+ */
+ if (cp->addr.type == BDADDR_LE_PUBLIC)
+ addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ addr_type = ADDR_LE_DEV_RANDOM;
+
+ conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
+ sec_level, auth_type);
+ }
+
+ if (IS_ERR(conn)) {
+ int status;
+
+ if (PTR_ERR(conn) == -EBUSY)
+ status = MGMT_STATUS_BUSY;
+ else
+ status = MGMT_STATUS_CONNECT_FAILED;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ status, &rp,
+ sizeof(rp));
+ goto unlock;
+ }
+
+ if (conn->connect_cfm_cb) {
+ hci_conn_drop(conn);
+ err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+ MGMT_STATUS_BUSY, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
+ /* For LE, just connecting isn't a proof that the pairing finished */
+ if (cp->addr.type == BDADDR_BREDR) {
+ conn->connect_cfm_cb = pairing_complete_cb;
+ conn->security_cfm_cb = pairing_complete_cb;
+ conn->disconn_cfm_cb = pairing_complete_cb;
+ } else {
+ conn->connect_cfm_cb = le_pairing_complete_cb;
+ conn->security_cfm_cb = le_pairing_complete_cb;
+ conn->disconn_cfm_cb = le_pairing_complete_cb;
+ }
+
+ conn->io_capability = cp->io_cap;
+ cmd->user_data = conn;
+
+ if (conn->state == BT_CONNECTED &&
+ hci_conn_security(conn, sec_level, auth_type))
+ pairing_complete(cmd, 0);
+
+ err = 0;
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_addr_info *addr = data;
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ BT_DBG("");
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
+ if (!cmd) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ conn = cmd->user_data;
+
+ if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto unlock;
+ }
+
+ pairing_complete(cmd, MGMT_STATUS_CANCELLED);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
+ addr, sizeof(*addr));
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_addr_info *addr, u16 mgmt_op,
+ u16 hci_op, __le32 passkey)
+{
+ struct pending_cmd *cmd;
+ struct hci_conn *conn;
+ int err;
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_POWERED, addr,
+ sizeof(*addr));
+ goto done;
+ }
+
+ if (addr->type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
+
+ if (!conn) {
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_NOT_CONNECTED, addr,
+ sizeof(*addr));
+ goto done;
+ }
+
+ if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
+ /* Continue with pairing via SMP. The hdev lock must be
+ * released as SMP may try to recquire it for crypto
+ * purposes.
+ */
+ hci_dev_unlock(hdev);
+ err = smp_user_confirm_reply(conn, mgmt_op, passkey);
+ hci_dev_lock(hdev);
+
+ if (!err)
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_SUCCESS, addr,
+ sizeof(*addr));
+ else
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+ MGMT_STATUS_FAILED, addr,
+ sizeof(*addr));
+
+ goto done;
+ }
+
+ cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
+ if (!cmd) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* Continue with pairing via HCI */
+ if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
+ struct hci_cp_user_passkey_reply cp;
+
+ bacpy(&cp.bdaddr, &addr->bdaddr);
+ cp.passkey = passkey;
+ err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
+ } else
+ err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
+ &addr->bdaddr);
+
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+done:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_pin_code_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_PIN_CODE_NEG_REPLY,
+ HCI_OP_PIN_CODE_NEG_REPLY, 0);
+}
+
+static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_user_confirm_reply *cp = data;
+
+ BT_DBG("");
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_CONFIRM_REPLY,
+ HCI_OP_USER_CONFIRM_REPLY, 0);
+}
+
+static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_user_confirm_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY,
+ HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+}
+
+static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_user_passkey_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_PASSKEY_REPLY,
+ HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+}
+
+static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_user_passkey_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY,
+ HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+}
+
+static void update_name(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_local_name cp;
+
+ memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
+
+ hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
+}
+
+static void set_name_complete(struct hci_dev *hdev, u8 status)
+{
+ struct mgmt_cp_set_local_name *cp;
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+ if (!cmd)
+ goto unlock;
+
+ cp = cmd->param;
+
+ if (status)
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ mgmt_status(status));
+ else
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ cp, sizeof(*cp));
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_local_name *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("");
+
+ hci_dev_lock(hdev);
+
+ /* If the old values are the same as the new ones just return a
+ * direct command complete event.
+ */
+ if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
+ !memcmp(hdev->short_name, cp->short_name,
+ sizeof(hdev->short_name))) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ data, len);
+ goto failed;
+ }
+
+ memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
+
+ if (!hdev_is_powered(hdev)) {
+ memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
+ data, len);
+ if (err < 0)
+ goto failed;
+
+ err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
+ sk);
+
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
+
+ hci_req_init(&req, hdev);
+
+ if (lmp_bredr_capable(hdev)) {
+ update_name(&req);
+ update_eir(&req);
+ }
+
+ /* The name is stored in the scan response data and so
+ * no need to udpate the advertising data here.
+ */
+ if (lmp_le_capable(hdev))
+ update_scan_rsp_data(&req);
+
+ err = hci_req_run(&req, set_name_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_POWERED);
+ goto unlock;
+ }
+
+ if (!lmp_ssp_capable(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_NOT_SUPPORTED);
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+ err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
+ 0, NULL);
+ else
+ err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ int err;
+
+ BT_DBG("%s ", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
+ struct mgmt_cp_add_remote_oob_data *cp = data;
+ u8 status;
+
+ err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
+ cp->hash, cp->randomizer);
+ if (err < 0)
+ status = MGMT_STATUS_FAILED;
+ else
+ status = MGMT_STATUS_SUCCESS;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ status, &cp->addr, sizeof(cp->addr));
+ } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
+ struct mgmt_cp_add_remote_oob_ext_data *cp = data;
+ u8 status;
+
+ err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
+ cp->hash192,
+ cp->randomizer192,
+ cp->hash256,
+ cp->randomizer256);
+ if (err < 0)
+ status = MGMT_STATUS_FAILED;
+ else
+ status = MGMT_STATUS_SUCCESS;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ status, &cp->addr, sizeof(cp->addr));
+ } else {
+ BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
+ err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_remove_remote_oob_data *cp = data;
+ u8 status;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
+ if (err < 0)
+ status = MGMT_STATUS_INVALID_PARAMS;
+ else
+ status = MGMT_STATUS_SUCCESS;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
+ status, &cp->addr, sizeof(cp->addr));
+
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ u8 type;
+ int err;
+
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ type = hdev->discovery.type;
+
+ err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+ &type, sizeof(type));
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+static void start_discovery_complete(struct hci_dev *hdev, u8 status)
+{
+ unsigned long timeout = 0;
+
+ BT_DBG("status %d", status);
+
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_start_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
+ return;
+ }
+
+ hci_dev_lock(hdev);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ hci_dev_unlock(hdev);
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_LE:
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ break;
+
+ case DISCOV_TYPE_INTERLEAVED:
+ timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
+ break;
+
+ case DISCOV_TYPE_BREDR:
+ break;
+
+ default:
+ BT_ERR("Invalid discovery type %d", hdev->discovery.type);
+ }
+
+ if (!timeout)
+ return;
+
+ queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
+}
+
+static int start_discovery(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_start_discovery *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_cp_le_set_scan_param param_cp;
+ struct hci_cp_le_set_scan_enable enable_cp;
+ struct hci_cp_inquiry inq_cp;
+ struct hci_request req;
+ /* General inquiry access code (GIAC) */
+ u8 lap[3] = { 0x33, 0x8b, 0x9e };
+ u8 status, own_addr_type;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_POWERED);
+ goto failed;
+ }
+
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ if (hdev->discovery.state != DISCOVERY_STOPPED) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ hdev->discovery.type = cp->type;
+
+ hci_req_init(&req, hdev);
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ status = mgmt_bredr_support(hdev);
+ if (status) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ status);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ hci_inquiry_cache_flush(hdev);
+
+ memset(&inq_cp, 0, sizeof(inq_cp));
+ memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
+ inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
+ hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
+ break;
+
+ case DISCOV_TYPE_LE:
+ case DISCOV_TYPE_INTERLEAVED:
+ status = mgmt_le_support(hdev);
+ if (status) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ status);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+ !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_NOT_SUPPORTED);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_REJECTED);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ /* If controller is scanning, it means the background scanning
+ * is running. Thus, we should temporarily stop it in order to
+ * set the discovery scanning parameters.
+ */
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ hci_req_add_le_scan_disable(&req);
+
+ memset(&param_cp, 0, sizeof(param_cp));
+
+ /* All active scans will be done with either a resolvable
+ * private address (when privacy feature has been enabled)
+ * or unresolvable private address.
+ */
+ err = hci_update_random_address(&req, true, &own_addr_type);
+ if (err < 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_FAILED);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ param_cp.type = LE_SCAN_ACTIVE;
+ param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
+ param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+ param_cp.own_address_type = own_addr_type;
+ hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+ &param_cp);
+
+ memset(&enable_cp, 0, sizeof(enable_cp));
+ enable_cp.enable = LE_SCAN_ENABLE;
+ enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+ &enable_cp);
+ break;
+
+ default:
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS);
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ err = hci_req_run(&req, start_discovery_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+ else
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+ int err;
+
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+ &hdev->discovery.type, sizeof(hdev->discovery.type));
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("status %d", status);
+
+ hci_dev_lock(hdev);
+
+ if (status) {
+ mgmt_stop_discovery_failed(hdev, status);
+ goto unlock;
+ }
+
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_stop_discovery *mgmt_cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hci_discovery_active(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_REJECTED, &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ goto unlock;
+ }
+
+ if (hdev->discovery.type != mgmt_cp->type) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+ MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ hci_stop_discovery(&req);
+
+ err = hci_req_run(&req, stop_discovery_complete);
+ if (!err) {
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ goto unlock;
+ }
+
+ mgmt_pending_remove(cmd);
+
+ /* If no HCI commands were sent we're done */
+ if (err == -ENODATA) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type, sizeof(mgmt_cp->type));
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_confirm_name *cp = data;
+ struct inquiry_entry *e;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (!hci_discovery_active(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_FAILED, &cp->addr,
+ sizeof(cp->addr));
+ goto failed;
+ }
+
+ e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
+ if (!e) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+ MGMT_STATUS_INVALID_PARAMS, &cp->addr,
+ sizeof(cp->addr));
+ goto failed;
+ }
+
+ if (cp->name_known) {
+ e->name_state = NAME_KNOWN;
+ list_del(&e->list);
+ } else {
+ e->name_state = NAME_NEEDED;
+ hci_inquiry_cache_update_resolve(hdev, e);
+ }
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
+ sizeof(cp->addr));
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_block_device *cp = data;
+ u8 status;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_lock(hdev);
+
+ err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
+ if (err < 0)
+ status = MGMT_STATUS_FAILED;
+ else
+ status = MGMT_STATUS_SUCCESS;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_unblock_device *cp = data;
+ u8 status;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_STATUS_INVALID_PARAMS,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_lock(hdev);
+
+ err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
+ if (err < 0)
+ status = MGMT_STATUS_INVALID_PARAMS;
+ else
+ status = MGMT_STATUS_SUCCESS;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
+ &cp->addr, sizeof(cp->addr));
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_device_id *cp = data;
+ struct hci_request req;
+ int err;
+ __u16 source;
+
+ BT_DBG("%s", hdev->name);
+
+ source = __le16_to_cpu(cp->source);
+
+ if (source > 0x0002)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->devid_source = source;
+ hdev->devid_vendor = __le16_to_cpu(cp->vendor);
+ hdev->devid_product = __le16_to_cpu(cp->product);
+ hdev->devid_version = __le16_to_cpu(cp->version);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+
+ hci_req_init(&req, hdev);
+ update_eir(&req);
+ hci_req_run(&req, NULL);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static void set_advertising_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
+ cmd_status_rsp, &mgmt_err);
+ return;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
+ &match);
+
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+}
+
+static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ u8 val, enabled, status;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ status = mgmt_le_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ status);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ val = !!cp->val;
+ enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+ /* The following conditions are ones which mean that we should
+ * not do any HCI communication but directly send a mgmt
+ * response to user space (after toggling the flag if
+ * necessary).
+ */
+ if (!hdev_is_powered(hdev) || val == enabled ||
+ hci_conn_num(hdev, LE_LINK) > 0) {
+ bool changed = false;
+
+ if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+ change_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ changed = true;
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
+ mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ if (val)
+ enable_advertising(&req);
+ else
+ disable_advertising(&req);
+
+ err = hci_req_run(&req, set_advertising_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_static_address(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_set_static_address *cp = data;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (hdev_is_powered(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_REJECTED);
+
+ if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
+ if (!bacmp(&cp->bdaddr, BDADDR_NONE))
+ return cmd_status(sk, hdev->id,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* Two most significant bits shall be set */
+ if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
+ return cmd_status(sk, hdev->id,
+ MGMT_OP_SET_STATIC_ADDRESS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_lock(hdev);
+
+ bacpy(&hdev->static_addr, &cp->bdaddr);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_set_scan_params *cp = data;
+ __u16 interval, window;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ interval = __le16_to_cpu(cp->interval);
+
+ if (interval < 0x0004 || interval > 0x4000)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ window = __le16_to_cpu(cp->window);
+
+ if (window < 0x0004 || window > 0x4000)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (window > interval)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->le_scan_interval = interval;
+ hdev->le_scan_window = window;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
+
+ /* If background scan is running, restart it so new parameters are
+ * loaded.
+ */
+ if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+ hdev->discovery.state == DISCOVERY_STOPPED) {
+ struct hci_request req;
+
+ hci_req_init(&req, hdev);
+
+ hci_req_add_le_scan_disable(&req);
+ hci_req_add_le_passive_scan(&req);
+
+ hci_req_run(&req, NULL);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ mgmt_status(status));
+ } else {
+ struct mgmt_mode *cp = cmd->param;
+
+ if (cp->val)
+ set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+ else
+ clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
+ new_settings(hdev, cmd->sk);
+ }
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
+ hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (!hdev_is_powered(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_NOT_POWERED);
+
+ if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+
+ if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
+ hdev);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
+ data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_req_init(&req, hdev);
+
+ write_fast_connectable(&req, cp->val);
+
+ err = hci_req_run(&req, fast_connectable_complete);
+ if (err < 0) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_FAILED);
+ mgmt_pending_remove(cmd);
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static void set_bredr_scan(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ u8 scan = 0;
+
+ /* Ensure that fast connectable is disabled. This function will
+ * not do anything if the page scan parameters are already what
+ * they should be.
+ */
+ write_fast_connectable(req, false);
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ scan |= SCAN_PAGE;
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ scan |= SCAN_INQUIRY;
+
+ if (scan)
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+static void set_bredr_complete(struct hci_dev *hdev, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
+ if (!cmd)
+ goto unlock;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ /* We need to restore the flag if related HCI commands
+ * failed.
+ */
+ clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ } else {
+ send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
+ new_settings(hdev, cmd->sk);
+ }
+
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ struct hci_request req;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+ goto unlock;
+ }
+
+ if (!hdev_is_powered(hdev)) {
+ if (!cp->val) {
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ }
+
+ change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+ if (err < 0)
+ goto unlock;
+
+ err = new_settings(hdev, sk);
+ goto unlock;
+ }
+
+ /* Reject disabling when powered on */
+ if (!cp->val) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ /* We need to flip the bit already here so that update_adv_data
+ * generates the correct flags.
+ */
+ set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+ set_bredr_scan(&req);
+
+ /* Since only the advertising data flags will change, there
+ * is no need to update the scan response data.
+ */
+ update_adv_data(&req);
+
+ err = hci_req_run(&req, set_bredr_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ struct pending_cmd *cmd;
+ u8 val, status;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ status = mgmt_bredr_support(hdev);
+ if (status)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ status);
+
+ if (!lmp_sc_capable(hdev) &&
+ !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ bool changed;
+
+ if (cp->val) {
+ changed = !test_and_set_bit(HCI_SC_ENABLED,
+ &hdev->dev_flags);
+ if (cp->val == 0x02)
+ set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ else
+ clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ } else {
+ changed = test_and_clear_bit(HCI_SC_ENABLED,
+ &hdev->dev_flags);
+ clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
+ if (err < 0)
+ goto failed;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+ goto failed;
+ }
+
+ if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
+ val = !!cp->val;
+
+ if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+ err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+ if (err < 0) {
+ mgmt_pending_remove(cmd);
+ goto failed;
+ }
+
+ if (cp->val == 0x02)
+ set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ else
+ clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ bool changed;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (cp->val)
+ changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ u16 len)
+{
+ struct mgmt_cp_set_privacy *cp = cp_data;
+ bool changed;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->privacy != 0x00 && cp->privacy != 0x01)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (hdev_is_powered(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+ MGMT_STATUS_REJECTED);
+
+ hci_dev_lock(hdev);
+
+ /* If user space supports this command it is also expected to
+ * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
+ */
+ set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+
+ if (cp->privacy) {
+ changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
+ memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
+ set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ } else {
+ changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
+ memset(hdev->irk, 0, sizeof(hdev->irk));
+ clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+ }
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static bool irk_is_valid(struct mgmt_irk_info *irk)
+{
+ switch (irk->addr.type) {
+ case BDADDR_LE_PUBLIC:
+ return true;
+
+ case BDADDR_LE_RANDOM:
+ /* Two most significant bits shall be set */
+ if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
+ return false;
+ return true;
+ }
+
+ return false;
+}
+
+static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ u16 len)
+{
+ struct mgmt_cp_load_irks *cp = cp_data;
+ u16 irk_count, expected_len;
+ int i, err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ irk_count = __le16_to_cpu(cp->irk_count);
+
+ expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
+ if (expected_len != len) {
+ BT_ERR("load_irks: expected %u bytes, got %u bytes",
+ expected_len, len);
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ BT_DBG("%s irk_count %u", hdev->name, irk_count);
+
+ for (i = 0; i < irk_count; i++) {
+ struct mgmt_irk_info *key = &cp->irks[i];
+
+ if (!irk_is_valid(key))
+ return cmd_status(sk, hdev->id,
+ MGMT_OP_LOAD_IRKS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_lock(hdev);
+
+ hci_smp_irks_clear(hdev);
+
+ for (i = 0; i < irk_count; i++) {
+ struct mgmt_irk_info *irk = &cp->irks[i];
+ u8 addr_type;
+
+ if (irk->addr.type == BDADDR_LE_PUBLIC)
+ addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ addr_type = ADDR_LE_DEV_RANDOM;
+
+ hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
+ BDADDR_ANY);
+ }
+
+ set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+static bool ltk_is_valid(struct mgmt_ltk_info *key)
+{
+ if (key->master != 0x00 && key->master != 0x01)
+ return false;
+
+ switch (key->addr.type) {
+ case BDADDR_LE_PUBLIC:
+ return true;
+
+ case BDADDR_LE_RANDOM:
+ /* Two most significant bits shall be set */
+ if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
+ return false;
+ return true;
+ }
+
+ return false;
+}
+
+static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ void *cp_data, u16 len)
+{
+ struct mgmt_cp_load_long_term_keys *cp = cp_data;
+ u16 key_count, expected_len;
+ int i, err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!lmp_le_capable(hdev))
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ key_count = __le16_to_cpu(cp->key_count);
+
+ expected_len = sizeof(*cp) + key_count *
+ sizeof(struct mgmt_ltk_info);
+ if (expected_len != len) {
+ BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ expected_len, len);
+ return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ BT_DBG("%s key_count %u", hdev->name, key_count);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_ltk_info *key = &cp->keys[i];
+
+ if (!ltk_is_valid(key))
+ return cmd_status(sk, hdev->id,
+ MGMT_OP_LOAD_LONG_TERM_KEYS,
+ MGMT_STATUS_INVALID_PARAMS);
+ }
+
+ hci_dev_lock(hdev);
+
+ hci_smp_ltks_clear(hdev);
+
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_ltk_info *key = &cp->keys[i];
+ u8 type, addr_type, authenticated;
+
+ if (key->addr.type == BDADDR_LE_PUBLIC)
+ addr_type = ADDR_LE_DEV_PUBLIC;
+ else
+ addr_type = ADDR_LE_DEV_RANDOM;
+
+ if (key->master)
+ type = HCI_SMP_LTK;
+ else
+ type = HCI_SMP_LTK_SLAVE;
+
+ switch (key->type) {
+ case MGMT_LTK_UNAUTHENTICATED:
+ authenticated = 0x00;
+ break;
+ case MGMT_LTK_AUTHENTICATED:
+ authenticated = 0x01;
+ break;
+ default:
+ continue;
+ }
+
+ hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
+ authenticated, key->val, key->enc_size, key->ediv,
+ key->rand);
+ }
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
+ NULL, 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+struct cmd_conn_lookup {
+ struct hci_conn *conn;
+ bool valid_tx_power;
+ u8 mgmt_status;
+};
+
+static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
+{
+ struct cmd_conn_lookup *match = data;
+ struct mgmt_cp_get_conn_info *cp;
+ struct mgmt_rp_get_conn_info rp;
+ struct hci_conn *conn = cmd->user_data;
+
+ if (conn != match->conn)
+ return;
+
+ cp = (struct mgmt_cp_get_conn_info *) cmd->param;
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ if (!match->mgmt_status) {
+ rp.rssi = conn->rssi;
+
+ if (match->valid_tx_power) {
+ rp.tx_power = conn->tx_power;
+ rp.max_tx_power = conn->max_tx_power;
+ } else {
+ rp.tx_power = HCI_TX_POWER_INVALID;
+ rp.max_tx_power = HCI_TX_POWER_INVALID;
+ }
+ }
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
+ match->mgmt_status, &rp, sizeof(rp));
+
+ hci_conn_drop(conn);
+
+ mgmt_pending_remove(cmd);
+}
+
+static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
+{
+ struct hci_cp_read_rssi *cp;
+ struct hci_conn *conn;
+ struct cmd_conn_lookup match;
+ u16 handle;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ /* TX power data is valid in case request completed successfully,
+ * otherwise we assume it's not valid. At the moment we assume that
+ * either both or none of current and max values are valid to keep code
+ * simple.
+ */
+ match.valid_tx_power = !status;
+
+ /* Commands sent in request are either Read RSSI or Read Transmit Power
+ * Level so we check which one was last sent to retrieve connection
+ * handle. Both commands have handle as first parameter so it's safe to
+ * cast data on the same command struct.
+ *
+ * First command sent is always Read RSSI and we fail only if it fails.
+ * In other case we simply override error to indicate success as we
+ * already remembered if TX power value is actually valid.
+ */
+ cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
+ if (!cp) {
+ cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
+ status = 0;
+ }
+
+ if (!cp) {
+ BT_ERR("invalid sent_cmd in response");
+ goto unlock;
+ }
+
+ handle = __le16_to_cpu(cp->handle);
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn) {
+ BT_ERR("unknown handle (%d) in response", handle);
+ goto unlock;
+ }
+
+ match.conn = conn;
+ match.mgmt_status = mgmt_status(status);
+
+ /* Cache refresh is complete, now reply for mgmt request for given
+ * connection only.
+ */
+ mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
+ get_conn_info_complete, &match);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_get_conn_info *cp = data;
+ struct mgmt_rp_get_conn_info rp;
+ struct hci_conn *conn;
+ unsigned long conn_info_age;
+ int err = 0;
+
+ BT_DBG("%s", hdev->name);
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ if (!bdaddr_type_is_valid(cp->addr.type))
+ return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_INVALID_PARAMS,
+ &rp, sizeof(rp));
+
+ hci_dev_lock(hdev);
+
+ if (!hdev_is_powered(hdev)) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
+
+ if (!conn || conn->state != BT_CONNECTED) {
+ err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
+ goto unlock;
+ }
+
+ /* To avoid client trying to guess when to poll again for information we
+ * calculate conn info age as random value between min/max set in hdev.
+ */
+ conn_info_age = hdev->conn_info_min_age +
+ prandom_u32_max(hdev->conn_info_max_age -
+ hdev->conn_info_min_age);
+
+ /* Query controller to refresh cached values if they are too old or were
+ * never read.
+ */
+ if (time_after(jiffies, conn->conn_info_timestamp +
+ msecs_to_jiffies(conn_info_age)) ||
+ !conn->conn_info_timestamp) {
+ struct hci_request req;
+ struct hci_cp_read_tx_power req_txp_cp;
+ struct hci_cp_read_rssi req_rssi_cp;
+ struct pending_cmd *cmd;
+
+ hci_req_init(&req, hdev);
+ req_rssi_cp.handle = cpu_to_le16(conn->handle);
+ hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
+ &req_rssi_cp);
+
+ /* For LE links TX power does not change thus we don't need to
+ * query for it once value is known.
+ */
+ if (!bdaddr_type_is_le(cp->addr.type) ||
+ conn->tx_power == HCI_TX_POWER_INVALID) {
+ req_txp_cp.handle = cpu_to_le16(conn->handle);
+ req_txp_cp.type = 0x00;
+ hci_req_add(&req, HCI_OP_READ_TX_POWER,
+ sizeof(req_txp_cp), &req_txp_cp);
+ }
+
+ /* Max TX power needs to be read only once per connection */
+ if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
+ req_txp_cp.handle = cpu_to_le16(conn->handle);
+ req_txp_cp.type = 0x01;
+ hci_req_add(&req, HCI_OP_READ_TX_POWER,
+ sizeof(req_txp_cp), &req_txp_cp);
+ }
+
+ err = hci_req_run(&req, conn_info_refresh_complete);
+ if (err < 0)
+ goto unlock;
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
+ data, len);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ hci_conn_hold(conn);
+ cmd->user_data = conn;
+
+ conn->conn_info_timestamp = jiffies;
+ } else {
+ /* Cache is valid, just reply with values cached in hci_conn */
+ rp.rssi = conn->rssi;
+ rp.tx_power = conn->tx_power;
+ rp.max_tx_power = conn->max_tx_power;
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
+static const struct mgmt_handler {
+ int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 data_len);
+ bool var_len;
+ size_t data_len;
+} mgmt_handlers[] = {
+ { NULL }, /* 0x0000 (no command) */
+ { read_version, false, MGMT_READ_VERSION_SIZE },
+ { read_commands, false, MGMT_READ_COMMANDS_SIZE },
+ { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
+ { read_controller_info, false, MGMT_READ_INFO_SIZE },
+ { set_powered, false, MGMT_SETTING_SIZE },
+ { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
+ { set_connectable, false, MGMT_SETTING_SIZE },
+ { set_fast_connectable, false, MGMT_SETTING_SIZE },
+ { set_pairable, false, MGMT_SETTING_SIZE },
+ { set_link_security, false, MGMT_SETTING_SIZE },
+ { set_ssp, false, MGMT_SETTING_SIZE },
+ { set_hs, false, MGMT_SETTING_SIZE },
+ { set_le, false, MGMT_SETTING_SIZE },
+ { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
+ { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
+ { add_uuid, false, MGMT_ADD_UUID_SIZE },
+ { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
+ { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
+ { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
+ { disconnect, false, MGMT_DISCONNECT_SIZE },
+ { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
+ { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
+ { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
+ { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
+ { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
+ { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
+ { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
+ { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
+ { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
+ { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
+ { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
+ { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
+ { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
+ { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
+ { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
+ { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
+ { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
+ { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
+ { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
+ { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
+ { set_advertising, false, MGMT_SETTING_SIZE },
+ { set_bredr, false, MGMT_SETTING_SIZE },
+ { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
+ { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
+ { set_secure_conn, false, MGMT_SETTING_SIZE },
+ { set_debug_keys, false, MGMT_SETTING_SIZE },
+ { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
+ { load_irks, true, MGMT_LOAD_IRKS_SIZE },
+ { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
+};
+
+
int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
{
- unsigned char *buf;
+ void *buf;
+ u8 *cp;
struct mgmt_hdr *hdr;
- u16 opcode, len;
+ u16 opcode, index, len;
+ struct hci_dev *hdev = NULL;
+ const struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -222,7 +4864,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
if (msglen < sizeof(*hdr))
return -EINVAL;
- buf = kmalloc(msglen, GFP_ATOMIC);
+ buf = kmalloc(msglen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -231,78 +4873,1203 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
goto done;
}
- hdr = (struct mgmt_hdr *) buf;
- opcode = get_unaligned_le16(&hdr->opcode);
- len = get_unaligned_le16(&hdr->len);
+ hdr = buf;
+ opcode = __le16_to_cpu(hdr->opcode);
+ index = __le16_to_cpu(hdr->index);
+ len = __le16_to_cpu(hdr->len);
if (len != msglen - sizeof(*hdr)) {
err = -EINVAL;
goto done;
}
- switch (opcode) {
- case MGMT_OP_READ_VERSION:
- err = read_version(sk);
- break;
- case MGMT_OP_READ_INDEX_LIST:
- err = read_index_list(sk);
- break;
- case MGMT_OP_READ_INFO:
- err = read_controller_info(sk, buf + sizeof(*hdr), len);
- break;
- default:
+ if (index != MGMT_INDEX_NONE) {
+ hdev = hci_dev_get(index);
+ if (!hdev) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
+ }
+ }
+
+ if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
+ mgmt_handlers[opcode].func == NULL) {
BT_DBG("Unknown op %u", opcode);
- err = cmd_status(sk, opcode, 0x01);
- break;
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_UNKNOWN_COMMAND);
+ goto done;
+ }
+
+ if ((hdev && opcode < MGMT_OP_READ_INFO) ||
+ (!hdev && opcode >= MGMT_OP_READ_INFO)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_INDEX);
+ goto done;
}
+ handler = &mgmt_handlers[opcode];
+
+ if ((handler->var_len && len < handler->data_len) ||
+ (!handler->var_len && len != handler->data_len)) {
+ err = cmd_status(sk, index, opcode,
+ MGMT_STATUS_INVALID_PARAMS);
+ goto done;
+ }
+
+ if (hdev)
+ mgmt_init_hdev(sk, hdev);
+
+ cp = buf + sizeof(*hdr);
+
+ err = handler->func(sk, hdev, cp, len);
if (err < 0)
goto done;
err = msglen;
done:
+ if (hdev)
+ hci_dev_put(hdev);
+
kfree(buf);
return err;
}
-static int mgmt_event(u16 event, void *data, u16 data_len)
+void mgmt_index_added(struct hci_dev *hdev)
{
- struct sk_buff *skb;
- struct mgmt_hdr *hdr;
+ if (hdev->dev_type != HCI_BREDR)
+ return;
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+ mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+}
- bt_cb(skb)->channel = HCI_CHANNEL_CONTROL;
+void mgmt_index_removed(struct hci_dev *hdev)
+{
+ u8 status = MGMT_STATUS_INVALID_INDEX;
- hdr = (void *) skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- hdr->len = cpu_to_le16(data_len);
+ if (hdev->dev_type != HCI_BREDR)
+ return;
- memcpy(skb_put(skb, data_len), data, data_len);
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
- hci_send_to_sock(NULL, skb);
- kfree_skb(skb);
+ mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
+}
- return 0;
+/* This function requires the caller holds hdev->lock */
+static void restart_le_auto_conns(struct hci_dev *hdev)
+{
+ struct hci_conn_params *p;
+
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
+ hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
+ }
+}
+
+static void powered_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ restart_le_auto_conns(hdev);
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
+
+ new_settings(hdev, match.sk);
+
+ hci_dev_unlock(hdev);
+
+ if (match.sk)
+ sock_put(match.sk);
+}
+
+static int powered_update_hci(struct hci_dev *hdev)
+{
+ struct hci_request req;
+ u8 link_sec;
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ !lmp_host_ssp_capable(hdev)) {
+ u8 ssp = 1;
+
+ hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
+ }
+
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+ lmp_bredr_capable(hdev)) {
+ struct hci_cp_write_le_host_supported cp;
+
+ cp.le = 1;
+ cp.simul = lmp_le_br_capable(hdev);
+
+ /* Check first if we already have the right
+ * host state (host features set)
+ */
+ if (cp.le != lmp_host_le_capable(hdev) ||
+ cp.simul != lmp_host_le_br_capable(hdev))
+ hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
+ sizeof(cp), &cp);
+ }
+
+ if (lmp_le_capable(hdev)) {
+ /* Make sure the controller has a good default for
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ update_adv_data(&req);
+ update_scan_rsp_data(&req);
+ }
+
+ if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ enable_advertising(&req);
+ }
+
+ link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+ if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
+ hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
+ sizeof(link_sec), &link_sec);
+
+ if (lmp_bredr_capable(hdev)) {
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ set_bredr_scan(&req);
+ update_class(&req);
+ update_name(&req);
+ update_eir(&req);
+ }
+
+ return hci_req_run(&req, powered_complete);
+}
+
+int mgmt_powered(struct hci_dev *hdev, u8 powered)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
+ u8 zero_cod[] = { 0, 0, 0 };
+ int err;
+
+ if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+ return 0;
+
+ if (powered) {
+ if (powered_update_hci(hdev) == 0)
+ return 0;
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
+ &match);
+ goto new_settings;
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
+ mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
+
+ if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
+ mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+ zero_cod, sizeof(zero_cod), NULL);
+
+new_settings:
+ err = new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ return err;
+}
+
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
+{
+ struct pending_cmd *cmd;
+ u8 status;
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+ if (!cmd)
+ return;
+
+ if (err == -ERFKILL)
+ status = MGMT_STATUS_RFKILLED;
+ else
+ status = MGMT_STATUS_FAILED;
+
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
+
+ mgmt_pending_remove(cmd);
}
-int mgmt_index_added(u16 index)
+void mgmt_discoverable_timeout(struct hci_dev *hdev)
{
- struct mgmt_ev_index_added ev;
+ struct hci_request req;
+
+ hci_dev_lock(hdev);
- put_unaligned_le16(index, &ev.index);
+ /* When discoverable timeout triggers, then just make sure
+ * the limited discoverable flag is cleared. Even in the case
+ * of a timeout triggered from general discoverable, it is
+ * safe to unconditionally clear the flag.
+ */
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
- return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev));
+ hci_req_init(&req, hdev);
+ if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+ u8 scan = SCAN_PAGE;
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
+ sizeof(scan), &scan);
+ }
+ update_class(&req);
+ update_adv_data(&req);
+ hci_req_run(&req, NULL);
+
+ hdev->discov_timeout = 0;
+
+ new_settings(hdev, NULL);
+
+ hci_dev_unlock(hdev);
}
-int mgmt_index_removed(u16 index)
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
{
- struct mgmt_ev_index_added ev;
+ bool changed;
+
+ /* Nothing needed here if there's a pending command since that
+ * commands request completion callback takes care of everything
+ * necessary.
+ */
+ if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+ return;
+
+ /* Powering off may clear the scan mode - don't let that interfere */
+ if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+ return;
+
+ if (discoverable) {
+ changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ } else {
+ clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+ changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+ }
- put_unaligned_le16(index, &ev.index);
+ if (changed) {
+ struct hci_request req;
- return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev));
+ /* In case this change in discoverable was triggered by
+ * a disabling of connectable there could be a need to
+ * update the advertising flags.
+ */
+ hci_req_init(&req, hdev);
+ update_adv_data(&req);
+ hci_req_run(&req, NULL);
+
+ new_settings(hdev, NULL);
+ }
+}
+
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
+{
+ bool changed;
+
+ /* Nothing needed here if there's a pending command since that
+ * commands request completion callback takes care of everything
+ * necessary.
+ */
+ if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+ return;
+
+ /* Powering off may clear the scan mode - don't let that interfere */
+ if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+ return;
+
+ if (connectable)
+ changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+
+ if (changed)
+ new_settings(hdev, NULL);
+}
+
+void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
+{
+ /* Powering off may stop advertising - don't let that interfere */
+ if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+ return;
+
+ if (advertising)
+ set_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ else
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+}
+
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+{
+ u8 mgmt_err = mgmt_status(status);
+
+ if (scan & SCAN_PAGE)
+ mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+
+ if (scan & SCAN_INQUIRY)
+ mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
+ cmd_status_rsp, &mgmt_err);
+}
+
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent)
+{
+ struct mgmt_ev_new_link_key ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.store_hint = persistent;
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+ ev.key.addr.type = BDADDR_BREDR;
+ ev.key.type = key->type;
+ memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
+ ev.key.pin_len = key->pin_len;
+
+ mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
+}
+
+static u8 mgmt_ltk_type(struct smp_ltk *ltk)
+{
+ if (ltk->authenticated)
+ return MGMT_LTK_AUTHENTICATED;
+
+ return MGMT_LTK_UNAUTHENTICATED;
+}
+
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
+{
+ struct mgmt_ev_new_long_term_key ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ /* Devices using resolvable or non-resolvable random addresses
+ * without providing an indentity resolving key don't require
+ * to store long term keys. Their addresses will change the
+ * next time around.
+ *
+ * Only when a remote device provides an identity address
+ * make sure the long term key is stored. If the remote
+ * identity is known, the long term keys are internally
+ * mapped to the identity address. So allow static random
+ * and public addresses here.
+ */
+ if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
+ (key->bdaddr.b[5] & 0xc0) != 0xc0)
+ ev.store_hint = 0x00;
+ else
+ ev.store_hint = persistent;
+
+ bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
+ ev.key.type = mgmt_ltk_type(key);
+ ev.key.enc_size = key->enc_size;
+ ev.key.ediv = key->ediv;
+ ev.key.rand = key->rand;
+
+ if (key->type == HCI_SMP_LTK)
+ ev.key.master = 1;
+
+ memcpy(ev.key.val, key->val, sizeof(key->val));
+
+ mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
+{
+ struct mgmt_ev_new_irk ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ /* For identity resolving keys from devices that are already
+ * using a public address or static random address, do not
+ * ask for storing this key. The identity resolving key really
+ * is only mandatory for devices using resovlable random
+ * addresses.
+ *
+ * Storing all identity resolving keys has the downside that
+ * they will be also loaded on next boot of they system. More
+ * identity resolving keys, means more time during scanning is
+ * needed to actually resolve these addresses.
+ */
+ if (bacmp(&irk->rpa, BDADDR_ANY))
+ ev.store_hint = 0x01;
+ else
+ ev.store_hint = 0x00;
+
+ bacpy(&ev.rpa, &irk->rpa);
+ bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
+ ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
+ memcpy(ev.irk.val, irk->val, sizeof(irk->val));
+
+ mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ bool persistent)
+{
+ struct mgmt_ev_new_csrk ev;
+
+ memset(&ev, 0, sizeof(ev));
+
+ /* Devices using resolvable or non-resolvable random addresses
+ * without providing an indentity resolving key don't require
+ * to store signature resolving keys. Their addresses will change
+ * the next time around.
+ *
+ * Only when a remote device provides an identity address
+ * make sure the signature resolving key is stored. So allow
+ * static random and public addresses here.
+ */
+ if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
+ (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
+ ev.store_hint = 0x00;
+ else
+ ev.store_hint = persistent;
+
+ bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
+ ev.key.master = csrk->master;
+ memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
+
+ mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
+}
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+ u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
+
+ return eir_len;
+}
+
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u32 flags, u8 *name, u8 name_len,
+ u8 *dev_class)
+{
+ char buf[512];
+ struct mgmt_ev_device_connected *ev = (void *) buf;
+ u16 eir_len = 0;
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
+
+ ev->flags = __cpu_to_le32(flags);
+
+ if (name_len > 0)
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
+ name, name_len);
+
+ if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
+ eir_len = eir_append_data(ev->eir, eir_len,
+ EIR_CLASS_OF_DEV, dev_class, 3);
+
+ ev->eir_len = cpu_to_le16(eir_len);
+
+ mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
+ sizeof(*ev) + eir_len, NULL);
+}
+
+static void disconnect_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct mgmt_cp_disconnect *cp = cmd->param;
+ struct sock **sk = data;
+ struct mgmt_rp_disconnect rp;
+
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
+ sizeof(rp));
+
+ *sk = cmd->sk;
+ sock_hold(*sk);
+
+ mgmt_pending_remove(cmd);
+}
+
+static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
+{
+ struct hci_dev *hdev = data;
+ struct mgmt_cp_unpair_device *cp = cmd->param;
+ struct mgmt_rp_unpair_device rp;
+
+ memset(&rp, 0, sizeof(rp));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
+
+ device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
+
+ cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+}
+
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 reason,
+ bool mgmt_connected)
+{
+ struct mgmt_ev_device_disconnected ev;
+ struct pending_cmd *power_off;
+ struct sock *sk = NULL;
+
+ power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+ if (power_off) {
+ struct mgmt_mode *cp = power_off->param;
+
+ /* The connection is still in hci_conn_hash so test for 1
+ * instead of 0 to know if this is the last one.
+ */
+ if (!cp->val && hci_conn_count(hdev) == 1) {
+ cancel_delayed_work(&hdev->power_off);
+ queue_work(hdev->req_workqueue, &hdev->power_off.work);
+ }
+ }
+
+ if (!mgmt_connected)
+ return;
+
+ if (link_type != ACL_LINK && link_type != LE_LINK)
+ return;
+
+ mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.reason = reason;
+
+ mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
+
+ if (sk)
+ sock_put(sk);
+
+ mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+ hdev);
+}
+
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
+{
+ u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
+ struct mgmt_cp_disconnect *cp;
+ struct mgmt_rp_disconnect rp;
+ struct pending_cmd *cmd;
+
+ mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
+ hdev);
+
+ cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
+ if (!cmd)
+ return;
+
+ cp = cmd->param;
+
+ if (bacmp(bdaddr, &cp->addr.bdaddr))
+ return;
+
+ if (cp->addr.type != bdaddr_type)
+ return;
+
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = bdaddr_type;
+
+ cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+ mgmt_status(status), &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+}
+
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
+{
+ struct mgmt_ev_connect_failed ev;
+ struct pending_cmd *power_off;
+
+ power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+ if (power_off) {
+ struct mgmt_mode *cp = power_off->param;
+
+ /* The connection is still in hci_conn_hash so test for 1
+ * instead of 0 to know if this is the last one.
+ */
+ if (!cp->val && hci_conn_count(hdev) == 1) {
+ cancel_delayed_work(&hdev->power_off);
+ queue_work(hdev->req_workqueue, &hdev->power_off.work);
+ }
+ }
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.status = mgmt_status(status);
+
+ mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
+{
+ struct mgmt_ev_pin_code_request ev;
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = BDADDR_BREDR;
+ ev.secure = secure;
+
+ mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_rp_pin_code_reply rp;
+
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
+ if (!cmd)
+ return;
+
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = BDADDR_BREDR;
+
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+}
+
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 status)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_rp_pin_code_reply rp;
+
+ cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
+ if (!cmd)
+ return;
+
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = BDADDR_BREDR;
+
+ cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
+ mgmt_status(status), &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+}
+
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 value,
+ u8 confirm_hint)
+{
+ struct mgmt_ev_user_confirm_request ev;
+
+ BT_DBG("%s", hdev->name);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.confirm_hint = confirm_hint;
+ ev.value = cpu_to_le32(value);
+
+ return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
+ NULL);
+}
+
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type)
+{
+ struct mgmt_ev_user_passkey_request ev;
+
+ BT_DBG("%s", hdev->name);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+
+ return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
+ NULL);
+}
+
+static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status,
+ u8 opcode)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_rp_user_confirm_reply rp;
+ int err;
+
+ cmd = mgmt_pending_find(opcode, hdev);
+ if (!cmd)
+ return -ENOENT;
+
+ bacpy(&rp.addr.bdaddr, bdaddr);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
+ err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
+ &rp, sizeof(rp));
+
+ mgmt_pending_remove(cmd);
+
+ return err;
+}
+
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_CONFIRM_REPLY);
+}
+
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY);
+}
+
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status, MGMT_OP_USER_PASSKEY_REPLY);
+}
+
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u8 status)
+{
+ return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
+ status,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY);
+}
+
+int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 link_type, u8 addr_type, u32 passkey,
+ u8 entered)
+{
+ struct mgmt_ev_passkey_notify ev;
+
+ BT_DBG("%s", hdev->name);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.passkey = __cpu_to_le32(passkey);
+ ev.entered = entered;
+
+ return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 status)
+{
+ struct mgmt_ev_auth_failed ev;
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
+ ev.status = mgmt_status(status);
+
+ mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+ mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
+ cmd_status_rsp, &mgmt_err);
+ return;
+ }
+
+ if (test_bit(HCI_AUTH, &hdev->flags))
+ changed = !test_and_set_bit(HCI_LINK_SECURITY,
+ &hdev->dev_flags);
+ else
+ changed = test_and_clear_bit(HCI_LINK_SECURITY,
+ &hdev->dev_flags);
+
+ mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
+ &match);
+
+ if (changed)
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+}
+
+static void clear_eir(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct hci_cp_write_eir cp;
+
+ if (!lmp_ext_inq_capable(hdev))
+ return;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+
+ memset(&cp, 0, sizeof(cp));
+
+ hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
+}
+
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ struct hci_request req;
+ bool changed = false;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
+ &hdev->dev_flags)) {
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ new_settings(hdev, NULL);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
+ &mgmt_err);
+ return;
+ }
+
+ if (enable) {
+ changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ } else {
+ changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+ if (!changed)
+ changed = test_and_clear_bit(HCI_HS_ENABLED,
+ &hdev->dev_flags);
+ else
+ clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
+
+ if (changed)
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ hci_req_init(&req, hdev);
+
+ if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+ update_eir(&req);
+ else
+ clear_eir(&req);
+
+ hci_req_run(&req, NULL);
+}
+
+void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ bool changed = false;
+
+ if (status) {
+ u8 mgmt_err = mgmt_status(status);
+
+ if (enable) {
+ if (test_and_clear_bit(HCI_SC_ENABLED,
+ &hdev->dev_flags))
+ new_settings(hdev, NULL);
+ clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
+ cmd_status_rsp, &mgmt_err);
+ return;
+ }
+
+ if (enable) {
+ changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ } else {
+ changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+ clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
+ settings_rsp, &match);
+
+ if (changed)
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+}
+
+static void sk_lookup(struct pending_cmd *cmd, void *data)
+{
+ struct cmd_lookup *match = data;
+
+ if (match->sk == NULL) {
+ match->sk = cmd->sk;
+ sock_hold(match->sk);
+ }
+}
+
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+ u8 status)
+{
+ struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
+
+ mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
+ mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
+ mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
+
+ if (!status)
+ mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
+ NULL);
+
+ if (match.sk)
+ sock_put(match.sk);
+}
+
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
+{
+ struct mgmt_cp_set_local_name ev;
+ struct pending_cmd *cmd;
+
+ if (status)
+ return;
+
+ memset(&ev, 0, sizeof(ev));
+ memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
+ memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
+ if (!cmd) {
+ memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
+
+ /* If this is a HCI command related to powering on the
+ * HCI dev don't send any mgmt signals.
+ */
+ if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+ return;
+ }
+
+ mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
+
+void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
+ u8 *randomizer192, u8 *hash256,
+ u8 *randomizer256, u8 status)
+{
+ struct pending_cmd *cmd;
+
+ BT_DBG("%s status %u", hdev->name, status);
+
+ cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
+ if (!cmd)
+ return;
+
+ if (status) {
+ cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ mgmt_status(status));
+ } else {
+ if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+ hash256 && randomizer256) {
+ struct mgmt_rp_read_local_oob_ext_data rp;
+
+ memcpy(rp.hash192, hash192, sizeof(rp.hash192));
+ memcpy(rp.randomizer192, randomizer192,
+ sizeof(rp.randomizer192));
+
+ memcpy(rp.hash256, hash256, sizeof(rp.hash256));
+ memcpy(rp.randomizer256, randomizer256,
+ sizeof(rp.randomizer256));
+
+ cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA, 0,
+ &rp, sizeof(rp));
+ } else {
+ struct mgmt_rp_read_local_oob_data rp;
+
+ memcpy(rp.hash, hash192, sizeof(rp.hash));
+ memcpy(rp.randomizer, randomizer192,
+ sizeof(rp.randomizer));
+
+ cmd_complete(cmd->sk, hdev->id,
+ MGMT_OP_READ_LOCAL_OOB_DATA, 0,
+ &rp, sizeof(rp));
+ }
+ }
+
+ mgmt_pending_remove(cmd);
+}
+
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+ u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
+ u8 scan_rsp_len)
+{
+ char buf[512];
+ struct mgmt_ev_device_found *ev = (void *) buf;
+ struct smp_irk *irk;
+ size_t ev_size;
+
+ if (!hci_discovery_active(hdev))
+ return;
+
+ /* Make sure that the buffer is big enough. The 5 extra bytes
+ * are for the potential CoD field.
+ */
+ if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ irk = hci_get_irk(hdev, bdaddr, addr_type);
+ if (irk) {
+ bacpy(&ev->addr.bdaddr, &irk->bdaddr);
+ ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
+ } else {
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
+ }
+
+ ev->rssi = rssi;
+ if (cfm_name)
+ ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
+ if (!ssp)
+ ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
+
+ if (eir_len > 0)
+ memcpy(ev->eir, eir, eir_len);
+
+ if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
+ eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
+ dev_class, 3);
+
+ if (scan_rsp_len > 0)
+ memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
+
+ ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
+ ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
+
+ mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
+}
+
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ u8 addr_type, s8 rssi, u8 *name, u8 name_len)
+{
+ struct mgmt_ev_device_found *ev;
+ char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
+ u16 eir_len;
+
+ ev = (struct mgmt_ev_device_found *) buf;
+
+ memset(buf, 0, sizeof(buf));
+
+ bacpy(&ev->addr.bdaddr, bdaddr);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
+ ev->rssi = rssi;
+
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
+ name_len);
+
+ ev->eir_len = cpu_to_le16(eir_len);
+
+ mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
+}
+
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
+{
+ struct mgmt_ev_discovering ev;
+ struct pending_cmd *cmd;
+
+ BT_DBG("%s discovering %u", hdev->name, discovering);
+
+ if (discovering)
+ cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+ else
+ cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+
+ if (cmd != NULL) {
+ u8 type = hdev->discovery.type;
+
+ cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
+ sizeof(type));
+ mgmt_pending_remove(cmd);
+ }
+
+ memset(&ev, 0, sizeof(ev));
+ ev.type = hdev->discovery.type;
+ ev.discovering = discovering;
+
+ mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_blocked ev;
+
+ cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = type;
+
+ return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
+
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_unblocked ev;
+
+ cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
+
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = type;
+
+ return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
+
+static void adv_enable_complete(struct hci_dev *hdev, u8 status)
+{
+ BT_DBG("%s status %u", hdev->name, status);
+
+ /* Clear the advertising mgmt setting if we failed to re-enable it */
+ if (status) {
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ new_settings(hdev, NULL);
+ }
+}
+
+void mgmt_reenable_advertising(struct hci_dev *hdev)
+{
+ struct hci_request req;
+
+ if (hci_conn_num(hdev, LE_LINK) > 0)
+ return;
+
+ if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+ return;
+
+ hci_req_init(&req, hdev);
+ enable_advertising(&req);
+
+ /* If this fails we have no option but to let user space know
+ * that we've disabled advertising.
+ */
+ if (hci_req_run(&req, adv_enable_complete) < 0) {
+ clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+ new_settings(hdev, NULL);
+ }
}
diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig
index 405a0e61e7d..18d352ea2bc 100644
--- a/net/bluetooth/rfcomm/Kconfig
+++ b/net/bluetooth/rfcomm/Kconfig
@@ -1,6 +1,6 @@
config BT_RFCOMM
tristate "RFCOMM protocol support"
- depends on BT && BT_L2CAP
+ depends on BT
help
RFCOMM provides connection oriented stream transport. RFCOMM
support is required for Dialup Networking, OBEX and other Bluetooth
@@ -12,6 +12,7 @@ config BT_RFCOMM
config BT_RFCOMM_TTY
bool "RFCOMM TTY support"
depends on BT_RFCOMM
+ depends on TTY
help
This option enables TTY emulation support for RFCOMM channels.
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 6b83776534f..754b6fe4f74 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
*/
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
#include <linux/kthread.h>
-#include <linux/slab.h>
-
-#include <net/sock.h>
-#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -51,8 +37,8 @@
#define VERSION "1.11"
-static int disable_cfc;
-static int l2cap_ertm;
+static bool disable_cfc;
+static bool l2cap_ertm;
static int channel_mtu = -1;
static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
@@ -62,7 +48,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
-static unsigned long rfcomm_event;
static LIST_HEAD(session_list);
@@ -84,7 +69,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
u8 sec_level,
int *err);
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
-static void rfcomm_session_del(struct rfcomm_session *s);
+static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
/* ---- RFCOMM frame parsing macros ---- */
#define __get_dlci(b) ((b & 0xfc) >> 2)
@@ -116,20 +101,13 @@ static void rfcomm_session_del(struct rfcomm_session *s);
#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
-static inline void rfcomm_schedule(void)
+static void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
- set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
wake_up_process(rfcomm_thread);
}
-static inline void rfcomm_session_put(struct rfcomm_session *s)
-{
- if (atomic_dec_and_test(&s->refcnt))
- rfcomm_session_del(s);
-}
-
/* ---- RFCOMM FCS computation ---- */
/* reversed, 8-bit, poly=0x07 */
@@ -208,9 +186,9 @@ static void rfcomm_l2state_change(struct sock *sk)
rfcomm_schedule();
}
-static void rfcomm_l2data_ready(struct sock *sk, int bytes)
+static void rfcomm_l2data_ready(struct sock *sk)
{
- BT_DBG("%p bytes %d", sk, bytes);
+ BT_DBG("%p", sk);
rfcomm_schedule();
}
@@ -229,13 +207,16 @@ static int rfcomm_l2sock_create(struct socket **sock)
return err;
}
-static inline int rfcomm_check_security(struct rfcomm_dlc *d)
+static int rfcomm_check_security(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
+ struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
+
__u8 auth_type;
switch (d->sec_level) {
case BT_SECURITY_HIGH:
+ case BT_SECURITY_FIPS:
auth_type = HCI_AT_GENERAL_BONDING_MITM;
break;
case BT_SECURITY_MEDIUM:
@@ -246,8 +227,7 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
break;
}
- return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level,
- auth_type);
+ return hci_conn_security(conn->hcon, d->sec_level, auth_type);
}
static void rfcomm_session_timeout(unsigned long arg)
@@ -264,16 +244,14 @@ static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
{
BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
- if (!mod_timer(&s->timer, jiffies + timeout))
- rfcomm_session_hold(s);
+ mod_timer(&s->timer, jiffies + timeout);
}
static void rfcomm_session_clear_timer(struct rfcomm_session *s)
{
BT_DBG("session %p state %ld", s, s->state);
- if (timer_pending(&s->timer) && del_timer(&s->timer))
- rfcomm_session_put(s);
+ del_timer_sync(&s->timer);
}
/* ---- RFCOMM DLCs ---- */
@@ -300,7 +278,7 @@ static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d)
{
BT_DBG("dlc %p state %ld", d, d->state);
- if (timer_pending(&d->timer) && del_timer(&d->timer))
+ if (del_timer(&d->timer))
rfcomm_dlc_put(d);
}
@@ -329,7 +307,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
skb_queue_head_init(&d->tx_queue);
- spin_lock_init(&d->lock);
+ mutex_init(&d->lock);
atomic_set(&d->refcnt, 1);
rfcomm_dlc_clear_state(d);
@@ -351,8 +329,6 @@ static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d)
{
BT_DBG("dlc %p session %p", d, s);
- rfcomm_session_hold(s);
-
rfcomm_session_clear_timer(s);
rfcomm_dlc_hold(d);
list_add(&d->list, &s->dlcs);
@@ -371,33 +347,34 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
if (list_empty(&s->dlcs))
rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT);
-
- rfcomm_session_put(s);
}
static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_dlc *d;
- struct list_head *p;
- list_for_each(p, &s->dlcs) {
- d = list_entry(p, struct rfcomm_dlc, list);
+ list_for_each_entry(d, &s->dlcs, list)
if (d->dlci == dlci)
return d;
- }
+
return NULL;
}
+static int rfcomm_check_channel(u8 channel)
+{
+ return channel < 1 || channel > 30;
+}
+
static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel)
{
struct rfcomm_session *s;
int err = 0;
u8 dlci;
- BT_DBG("dlc %p state %ld %s %s channel %d",
- d, d->state, batostr(src), batostr(dst), channel);
+ BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d",
+ d, d->state, src, dst, channel);
- if (channel < 1 || channel > 30)
+ if (rfcomm_check_channel(channel))
return -EINVAL;
if (d->state != BT_OPEN && d->state != BT_CLOSED)
@@ -454,6 +431,20 @@ int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 chann
return r;
}
+static void __rfcomm_dlc_disconn(struct rfcomm_dlc *d)
+{
+ struct rfcomm_session *s = d->session;
+
+ d->state = BT_DISCONN;
+ if (skb_queue_empty(&d->tx_queue)) {
+ rfcomm_send_disc(s, d->dlci);
+ rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT);
+ } else {
+ rfcomm_queue_disc(d);
+ rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2);
+ }
+}
+
static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
{
struct rfcomm_session *s = d->session;
@@ -466,32 +457,29 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
switch (d->state) {
case BT_CONNECT:
case BT_CONFIG:
+ case BT_OPEN:
+ case BT_CONNECT2:
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
rfcomm_schedule();
- break;
+ return 0;
}
- /* Fall through */
+ }
+ switch (d->state) {
+ case BT_CONNECT:
case BT_CONNECTED:
- d->state = BT_DISCONN;
- if (skb_queue_empty(&d->tx_queue)) {
- rfcomm_send_disc(s, d->dlci);
- rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT);
- } else {
- rfcomm_queue_disc(d);
- rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2);
- }
+ __rfcomm_dlc_disconn(d);
break;
- case BT_OPEN:
- case BT_CONNECT2:
- if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
- set_bit(RFCOMM_AUTH_REJECT, &d->flags);
- rfcomm_schedule();
+ case BT_CONFIG:
+ if (s->state != BT_BOUND) {
+ __rfcomm_dlc_disconn(d);
break;
}
- /* Fall through */
+ /* if closing a dlc in a session that hasn't been started,
+ * just close and unlink the dlc
+ */
default:
rfcomm_dlc_clear_timer(d);
@@ -510,16 +498,57 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
int rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
{
- int r;
+ int r = 0;
+ struct rfcomm_dlc *d_list;
+ struct rfcomm_session *s, *s_list;
+
+ BT_DBG("dlc %p state %ld dlci %d err %d", d, d->state, d->dlci, err);
rfcomm_lock();
- r = __rfcomm_dlc_close(d, err);
+ s = d->session;
+ if (!s)
+ goto no_session;
+
+ /* after waiting on the mutex check the session still exists
+ * then check the dlc still exists
+ */
+ list_for_each_entry(s_list, &session_list, list) {
+ if (s_list == s) {
+ list_for_each_entry(d_list, &s->dlcs, list) {
+ if (d_list == d) {
+ r = __rfcomm_dlc_close(d, err);
+ break;
+ }
+ }
+ break;
+ }
+ }
+no_session:
rfcomm_unlock();
return r;
}
+struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel)
+{
+ struct rfcomm_session *s;
+ struct rfcomm_dlc *dlc = NULL;
+ u8 dlci;
+
+ if (rfcomm_check_channel(channel))
+ return ERR_PTR(-EINVAL);
+
+ rfcomm_lock();
+ s = rfcomm_session_get(src, dst);
+ if (s) {
+ dlci = __dlci(!s->initiator, channel);
+ dlc = rfcomm_dlc_get(s, dlci);
+ }
+ rfcomm_unlock();
+ return dlc;
+}
+
int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
{
int len = skb->len;
@@ -540,6 +569,20 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
return len;
}
+void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb)
+{
+ int len = skb->len;
+
+ BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len);
+
+ rfcomm_make_uih(skb, d->addr);
+ skb_queue_tail(&d->tx_queue, skb);
+
+ if (d->state == BT_CONNECTED &&
+ !test_bit(RFCOMM_TX_THROTTLED, &d->flags))
+ rfcomm_schedule();
+}
+
void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{
BT_DBG("dlc %p state %ld", d, d->state);
@@ -626,7 +669,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
return s;
}
-static void rfcomm_session_del(struct rfcomm_session *s)
+static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s)
{
int state = s->state;
@@ -634,44 +677,42 @@ static void rfcomm_session_del(struct rfcomm_session *s)
list_del(&s->list);
- if (state == BT_CONNECTED)
- rfcomm_send_disc(s, 0);
-
rfcomm_session_clear_timer(s);
sock_release(s->sock);
kfree(s);
if (state != BT_LISTEN)
module_put(THIS_MODULE);
+
+ return NULL;
}
static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
{
struct rfcomm_session *s;
struct list_head *p, *n;
- struct bt_sock *sk;
+ struct l2cap_chan *chan;
list_for_each_safe(p, n, &session_list) {
s = list_entry(p, struct rfcomm_session, list);
- sk = bt_sk(s->sock->sk);
+ chan = l2cap_pi(s->sock->sk)->chan;
- if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) &&
- !bacmp(&sk->dst, dst))
+ if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) &&
+ !bacmp(&chan->dst, dst))
return s;
}
return NULL;
}
-static void rfcomm_session_close(struct rfcomm_session *s, int err)
+static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s,
+ int err)
{
struct rfcomm_dlc *d;
struct list_head *p, *n;
- BT_DBG("session %p state %ld err %d", s, s->state, err);
-
- rfcomm_session_hold(s);
-
s->state = BT_CLOSED;
+ BT_DBG("session %p state %ld err %d", s, s->state, err);
+
/* Close all dlcs */
list_for_each_safe(p, n, &s->dlcs) {
d = list_entry(p, struct rfcomm_dlc, list);
@@ -680,7 +721,7 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
}
rfcomm_session_clear_timer(s);
- rfcomm_session_put(s);
+ return rfcomm_session_del(s);
}
static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
@@ -693,7 +734,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
struct socket *sock;
struct sock *sk;
- BT_DBG("%s %s", batostr(src), batostr(dst));
+ BT_DBG("%pMR -> %pMR", src, dst);
*err = rfcomm_l2sock_create(&sock);
if (*err < 0)
@@ -703,6 +744,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
addr.l2_family = AF_BLUETOOTH;
addr.l2_psm = 0;
addr.l2_cid = 0;
+ addr.l2_bdaddr_type = BDADDR_BREDR;
*err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
if (*err < 0)
goto failed;
@@ -710,10 +752,10 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->imtu = l2cap_mtu;
- l2cap_pi(sk)->sec_level = sec_level;
+ l2cap_pi(sk)->chan->imtu = l2cap_mtu;
+ l2cap_pi(sk)->chan->sec_level = sec_level;
if (l2cap_ertm)
- l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
+ l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
release_sock(sk);
s = rfcomm_session_add(sock, BT_BOUND);
@@ -728,12 +770,12 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
addr.l2_family = AF_BLUETOOTH;
addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
addr.l2_cid = 0;
+ addr.l2_bdaddr_type = BDADDR_BREDR;
*err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
if (*err == 0 || *err == -EINPROGRESS)
return s;
- rfcomm_session_del(s);
- return NULL;
+ return rfcomm_session_del(s);
failed:
sock_release(sock);
@@ -742,17 +784,16 @@ failed:
void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst)
{
- struct sock *sk = s->sock->sk;
+ struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
if (src)
- bacpy(src, &bt_sk(sk)->src);
+ bacpy(src, &chan->src);
if (dst)
- bacpy(dst, &bt_sk(sk)->dst);
+ bacpy(dst, &chan->dst);
}
/* ---- RFCOMM frame sending ---- */
static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
{
- struct socket *sock = s->sock;
struct kvec iv = { data, len };
struct msghdr msg;
@@ -760,7 +801,14 @@ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
memset(&msg, 0, sizeof(msg));
- return kernel_sendmsg(sock, &msg, &iv, 1, len);
+ return kernel_sendmsg(s->sock, &msg, &iv, 1, len);
+}
+
+static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
+{
+ BT_DBG("%p cmd %u", s, cmd->ctrl);
+
+ return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd));
}
static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
@@ -774,7 +822,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -788,7 +836,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -802,7 +850,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -838,7 +886,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci)
cmd.len = __len8(0);
cmd.fcs = __fcs2((u8 *) &cmd);
- return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+ return rfcomm_send_cmd(s, &cmd);
}
static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -1116,7 +1164,7 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
}
/* ---- RFCOMM frame reception ---- */
-static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
+static struct rfcomm_session *rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
{
BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
@@ -1125,7 +1173,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci);
if (!d) {
rfcomm_send_dm(s, dlci);
- return 0;
+ return s;
}
switch (d->state) {
@@ -1147,6 +1195,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
if (list_empty(&s->dlcs)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
+ rfcomm_session_clear_timer(s);
}
break;
@@ -1160,19 +1209,14 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
break;
case BT_DISCONN:
- /* When socket is closed and we are not RFCOMM
- * initiator rfcomm_process_rx already calls
- * rfcomm_session_put() */
- if (s->sock->sk->sk_state != BT_CLOSED)
- if (list_empty(&s->dlcs))
- rfcomm_session_put(s);
+ s = rfcomm_session_close(s, ECONNRESET);
break;
}
}
- return 0;
+ return s;
}
-static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
+static struct rfcomm_session *rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
{
int err = 0;
@@ -1196,13 +1240,13 @@ static int rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
else
err = ECONNRESET;
- s->state = BT_CLOSED;
- rfcomm_session_close(s, err);
+ s = rfcomm_session_close(s, err);
}
- return 0;
+ return s;
}
-static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
+static struct rfcomm_session *rfcomm_recv_disc(struct rfcomm_session *s,
+ u8 dlci)
{
int err = 0;
@@ -1231,16 +1275,15 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
else
err = ECONNRESET;
- s->state = BT_CLOSED;
- rfcomm_session_close(s, err);
+ s = rfcomm_session_close(s, err);
}
-
- return 0;
+ return s;
}
void rfcomm_dlc_accept(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
+ struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
BT_DBG("dlc %p", d);
@@ -1254,7 +1297,7 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
rfcomm_dlc_unlock(d);
if (d->role_switch)
- hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00);
+ hci_conn_switch_role(conn->hcon, 0x00);
rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
}
@@ -1655,11 +1698,18 @@ drop:
return 0;
}
-static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
+static struct rfcomm_session *rfcomm_recv_frame(struct rfcomm_session *s,
+ struct sk_buff *skb)
{
struct rfcomm_hdr *hdr = (void *) skb->data;
u8 type, dlci, fcs;
+ if (!s) {
+ /* no session, so free socket data */
+ kfree_skb(skb);
+ return s;
+ }
+
dlci = __get_dlci(hdr->addr);
type = __get_type(hdr->ctrl);
@@ -1670,7 +1720,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
if (__check_fcs(skb->data, type, fcs)) {
BT_ERR("bad checksum in packet");
kfree_skb(skb);
- return -EILSEQ;
+ return s;
}
if (__test_ea(hdr->len))
@@ -1686,22 +1736,23 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
case RFCOMM_DISC:
if (__test_pf(hdr->ctrl))
- rfcomm_recv_disc(s, dlci);
+ s = rfcomm_recv_disc(s, dlci);
break;
case RFCOMM_UA:
if (__test_pf(hdr->ctrl))
- rfcomm_recv_ua(s, dlci);
+ s = rfcomm_recv_ua(s, dlci);
break;
case RFCOMM_DM:
- rfcomm_recv_dm(s, dlci);
+ s = rfcomm_recv_dm(s, dlci);
break;
case RFCOMM_UIH:
- if (dlci)
- return rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb);
-
+ if (dlci) {
+ rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb);
+ return s;
+ }
rfcomm_recv_mcc(s, skb);
break;
@@ -1710,7 +1761,7 @@ static int rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb)
break;
}
kfree_skb(skb);
- return 0;
+ return s;
}
/* ---- Connection and data processing ---- */
@@ -1739,7 +1790,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
/* Send data queued for the DLC.
* Return number of frames left in the queue.
*/
-static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
+static int rfcomm_process_tx(struct rfcomm_dlc *d)
{
struct sk_buff *skb;
int err;
@@ -1787,7 +1838,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
return skb_queue_len(&d->tx_queue);
}
-static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
+static void rfcomm_process_dlcs(struct rfcomm_session *s)
{
struct rfcomm_dlc *d;
struct list_head *p, *n;
@@ -1802,6 +1853,11 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
continue;
}
+ if (test_bit(RFCOMM_ENC_DROP, &d->flags)) {
+ __rfcomm_dlc_close(d, ECONNREFUSED);
+ continue;
+ }
+
if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (d->out) {
@@ -1842,7 +1898,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
}
}
-static inline void rfcomm_process_rx(struct rfcomm_session *s)
+static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
{
struct socket *sock = s->sock;
struct sock *sk = sock->sk;
@@ -1853,18 +1909,19 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- rfcomm_recv_frame(s, skb);
+ if (!skb_linearize(skb))
+ s = rfcomm_recv_frame(s, skb);
+ else
+ kfree_skb(skb);
}
- if (sk->sk_state == BT_CLOSED) {
- if (!s->initiator)
- rfcomm_session_put(s);
+ if (s && (sk->sk_state == BT_CLOSED))
+ s = rfcomm_session_close(s, sk->sk_err);
- rfcomm_session_close(s, sk->sk_err);
- }
+ return s;
}
-static inline void rfcomm_accept_connection(struct rfcomm_session *s)
+static void rfcomm_accept_connection(struct rfcomm_session *s)
{
struct socket *sock = s->sock, *nsock;
int err;
@@ -1886,18 +1943,17 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
s = rfcomm_session_add(nsock, BT_OPEN);
if (s) {
- rfcomm_session_hold(s);
-
/* We should adjust MTU on incoming sessions.
* L2CAP MTU minus UIH header and FCS. */
- s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5;
+ s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
+ l2cap_pi(nsock->sk)->chan->imtu) - 5;
rfcomm_schedule();
} else
sock_release(nsock);
}
-static inline void rfcomm_check_connection(struct rfcomm_session *s)
+static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s)
{
struct sock *sk = s->sock->sk;
@@ -1909,19 +1965,19 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
/* We can adjust MTU on outgoing sessions.
* L2CAP MTU minus UIH header and FCS. */
- s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) - 5;
+ s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
rfcomm_send_sabm(s, 0);
break;
case BT_CLOSED:
- s->state = BT_CLOSED;
- rfcomm_session_close(s, sk->sk_err);
+ s = rfcomm_session_close(s, sk->sk_err);
break;
}
+ return s;
}
-static inline void rfcomm_process_sessions(void)
+static void rfcomm_process_sessions(void)
{
struct list_head *p, *n;
@@ -1934,30 +1990,25 @@ static inline void rfcomm_process_sessions(void)
if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
- rfcomm_session_put(s);
continue;
}
- if (s->state == BT_LISTEN) {
+ switch (s->state) {
+ case BT_LISTEN:
rfcomm_accept_connection(s);
continue;
- }
-
- rfcomm_session_hold(s);
- switch (s->state) {
case BT_BOUND:
- rfcomm_check_connection(s);
+ s = rfcomm_check_connection(s);
break;
default:
- rfcomm_process_rx(s);
+ s = rfcomm_process_rx(s);
break;
}
- rfcomm_process_dlcs(s);
-
- rfcomm_session_put(s);
+ if (s)
+ rfcomm_process_dlcs(s);
}
rfcomm_unlock();
@@ -1983,6 +2034,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
addr.l2_family = AF_BLUETOOTH;
addr.l2_psm = cpu_to_le16(RFCOMM_PSM);
addr.l2_cid = 0;
+ addr.l2_bdaddr_type = BDADDR_BREDR;
err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
if (err < 0) {
BT_ERR("Bind failed %d", err);
@@ -1992,7 +2044,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->imtu = l2cap_mtu;
+ l2cap_pi(sk)->chan->imtu = l2cap_mtu;
release_sock(sk);
/* Start listening on the socket */
@@ -2004,10 +2056,11 @@ static int rfcomm_add_listener(bdaddr_t *ba)
/* Add listening session */
s = rfcomm_session_add(sock, BT_LISTEN);
- if (!s)
+ if (!s) {
+ err = -ENOMEM;
goto failed;
+ }
- rfcomm_session_hold(s);
return 0;
failed:
sock_release(sock);
@@ -2035,19 +2088,18 @@ static int rfcomm_run(void *unused)
rfcomm_add_listener(BDADDR_ANY);
- while (!kthread_should_stop()) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
- /* No pending events. Let's sleep.
- * Incoming connections and data will wake us up. */
- schedule();
- }
- set_current_state(TASK_RUNNING);
+
+ if (kthread_should_stop())
+ break;
/* Process stuff */
- clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
rfcomm_process_sessions();
+
+ schedule();
}
+ __set_current_state(TASK_RUNNING);
rfcomm_kill_listener();
@@ -2066,15 +2118,13 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
if (!s)
return;
- rfcomm_session_hold(s);
-
list_for_each_safe(p, n, &s->dlcs) {
d = list_entry(p, struct rfcomm_dlc, list);
if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (status || encrypt == 0x00) {
- __rfcomm_dlc_close(d, ECONNREFUSED);
+ set_bit(RFCOMM_ENC_DROP, &d->flags);
continue;
}
}
@@ -2084,8 +2134,9 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
set_bit(RFCOMM_SEC_PENDING, &d->flags);
rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
continue;
- } else if (d->sec_level == BT_SECURITY_HIGH) {
- __rfcomm_dlc_close(d, ECONNREFUSED);
+ } else if (d->sec_level == BT_SECURITY_HIGH ||
+ d->sec_level == BT_SECURITY_FIPS) {
+ set_bit(RFCOMM_ENC_DROP, &d->flags);
continue;
}
}
@@ -2093,14 +2144,12 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
continue;
- if (!status)
+ if (!status && hci_conn_check_secure(conn, d->sec_level))
set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
else
set_bit(RFCOMM_AUTH_REJECT, &d->flags);
}
- rfcomm_session_put(s);
-
rfcomm_schedule();
}
@@ -2112,21 +2161,17 @@ static struct hci_cb rfcomm_cb = {
static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
{
struct rfcomm_session *s;
- struct list_head *pp, *p;
rfcomm_lock();
- list_for_each(p, &session_list) {
- s = list_entry(p, struct rfcomm_session, list);
- list_for_each(pp, &s->dlcs) {
- struct sock *sk = s->sock->sk;
- struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
-
- seq_printf(f, "%s %s %ld %d %d %d %d\n",
- batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst),
- d->state, d->dlci, d->mtu,
- d->rx_credits, d->tx_credits);
+ list_for_each_entry(s, &session_list, list) {
+ struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
+ struct rfcomm_dlc *d;
+ list_for_each_entry(d, &s->dlcs, list) {
+ seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n",
+ &chan->src, &chan->dst,
+ d->state, d->dlci, d->mtu,
+ d->rx_credits, d->tx_credits);
}
}
@@ -2154,8 +2199,6 @@ static int __init rfcomm_init(void)
{
int err;
- l2cap_load();
-
hci_register_cb(&rfcomm_cb);
rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
@@ -2164,13 +2207,6 @@ static int __init rfcomm_init(void)
goto unregister;
}
- if (bt_debugfs) {
- rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
- bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
- if (!rfcomm_dlc_debugfs)
- BT_ERR("Failed to create RFCOMM debug file");
- }
-
err = rfcomm_init_ttys();
if (err < 0)
goto stop;
@@ -2181,6 +2217,13 @@ static int __init rfcomm_init(void)
BT_INFO("RFCOMM ver %s", VERSION);
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
+ bt_debugfs, NULL,
+ &rfcomm_dlc_debugfs_fops);
+
return 0;
cleanup:
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 66cc1f0c3df..c603a5eb472 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
* RFCOMM sockets.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
+#include <linux/export.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -73,7 +54,7 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
atomic_add(skb->len, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
@@ -103,10 +84,11 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
sock_set_flag(sk, SOCK_ZAPPED);
bt_accept_unlink(sk);
}
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
} else {
if (d->state == BT_CONNECTED)
- rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
+ rfcomm_session_getaddr(d->session,
+ &rfcomm_pi(sk)->src, NULL);
sk->sk_state_change(sk);
}
@@ -123,18 +105,22 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
}
/* ---- Socket functions ---- */
-static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
+static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL;
- struct hlist_node *node;
- sk_for_each(sk, node, &rfcomm_sk_list.head) {
- if (rfcomm_pi(sk)->channel == channel &&
- !bacmp(&bt_sk(sk)->src, src))
+ sk_for_each(sk, &rfcomm_sk_list.head) {
+ if (rfcomm_pi(sk)->channel != channel)
+ continue;
+
+ if (bacmp(&rfcomm_pi(sk)->src, src))
+ continue;
+
+ if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN)
break;
}
- return node ? sk : NULL;
+ return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
@@ -143,28 +129,27 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
read_lock(&rfcomm_sk_list.lock);
- sk_for_each(sk, node, &rfcomm_sk_list.head) {
+ sk_for_each(sk, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
+ if (!bacmp(&rfcomm_pi(sk)->src, src))
break;
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
read_unlock(&rfcomm_sk_list.lock);
- return node ? sk : sk1;
+ return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
@@ -260,10 +245,13 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
if (parent) {
sk->sk_type = parent->sk_type;
- pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
+ pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
+
+ security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
@@ -348,9 +336,10 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
+ int chan = sa->rc_channel;
int err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa->rc_bdaddr));
+ BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
@@ -367,18 +356,18 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
goto done;
}
- write_lock_bh(&rfcomm_sk_list.lock);
+ write_lock(&rfcomm_sk_list.lock);
- if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
+ if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
- rfcomm_pi(sk)->channel = sa->rc_channel;
+ bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
+ rfcomm_pi(sk)->channel = chan;
sk->sk_state = BT_BOUND;
}
- write_unlock_bh(&rfcomm_sk_list.lock);
+ write_unlock(&rfcomm_sk_list.lock);
done:
release_sock(sk);
@@ -411,13 +400,14 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
}
sk->sk_state = BT_CONNECT;
- bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
+ bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
d->sec_level = rfcomm_pi(sk)->sec_level;
d->role_switch = rfcomm_pi(sk)->role_switch;
- err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
+ err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
+ sa->rc_channel);
if (!err)
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
@@ -447,21 +437,21 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
}
if (!rfcomm_pi(sk)->channel) {
- bdaddr_t *src = &bt_sk(sk)->src;
+ bdaddr_t *src = &rfcomm_pi(sk)->src;
u8 channel;
err = -EINVAL;
- write_lock_bh(&rfcomm_sk_list.lock);
+ write_lock(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
- if (!__rfcomm_get_sock_by_addr(channel, src)) {
+ if (!__rfcomm_get_listen_sock_by_addr(channel, src)) {
rfcomm_pi(sk)->channel = channel;
err = 0;
break;
}
- write_unlock_bh(&rfcomm_sk_list.lock);
+ write_unlock(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
@@ -483,12 +473,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
long timeo;
int err = 0;
- lock_sock(sk);
-
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
@@ -501,19 +486,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
+ nsk = bt_accept_dequeue(sk, newsock);
+ if (nsk)
+ break;
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
@@ -521,8 +507,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -544,12 +534,17 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
BT_DBG("sock %p, sk %p", sock, sk);
+ if (peer && sk->sk_state != BT_CONNECTED &&
+ sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
+ return -ENOTCONN;
+
+ memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
- bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
+ bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);
else
- bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
+ bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
@@ -561,7 +556,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
struct sk_buff *skb;
- int sent = 0;
+ int sent;
if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
return -ENOTCONN;
@@ -576,6 +571,10 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
+ sent = bt_sock_wait_ready(sk, msg->msg_flags);
+ if (sent)
+ goto done;
+
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
@@ -597,6 +596,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
break;
}
+ skb->priority = sk->sk_priority;
+
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
@@ -609,6 +610,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
len -= size;
}
+done:
release_sock(sk);
return sent;
@@ -656,6 +658,11 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
break;
}
+ if (opt & RFCOMM_LM_FIPS) {
+ err = -EINVAL;
+ break;
+ }
+
if (opt & RFCOMM_LM_AUTH)
rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & RFCOMM_LM_ENCRYPT)
@@ -679,7 +686,8 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
{
struct sock *sk = sock->sk;
struct bt_security sec;
- int len, err = 0;
+ int err = 0;
+ size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -726,7 +734,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+
break;
default:
@@ -742,6 +754,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
{
struct sock *sk = sock->sk;
struct sock *l2cap_sk;
+ struct l2cap_conn *conn;
struct rfcomm_conninfo cinfo;
int len, err = 0;
u32 opt;
@@ -764,7 +777,11 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
break;
case BT_SECURITY_HIGH:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
- RFCOMM_LM_SECURE;
+ RFCOMM_LM_SECURE;
+ break;
+ case BT_SECURITY_FIPS:
+ opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
+ RFCOMM_LM_SECURE | RFCOMM_LM_FIPS;
break;
default:
opt = 0;
@@ -776,6 +793,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
+
break;
case RFCOMM_CONNINFO:
@@ -786,9 +804,11 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
}
l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
+ conn = l2cap_pi(l2cap_sk)->chan->conn;
- cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
- memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
+ memset(&cinfo, 0, sizeof(cinfo));
+ cinfo.hci_handle = conn->hcon->handle;
+ memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
@@ -832,6 +852,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
}
sec.level = rfcomm_pi(sk)->sec_level;
+ sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
@@ -845,7 +866,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -951,9 +973,11 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
if (!sk)
goto done;
+ bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
+
rfcomm_sock_init(sk, parent);
- bacpy(&bt_sk(sk)->src, &src);
- bacpy(&bt_sk(sk)->dst, &dst);
+ bacpy(&rfcomm_pi(sk)->src, &src);
+ bacpy(&rfcomm_pi(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
@@ -966,7 +990,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
done:
bh_unlock_sock(parent);
- if (bt_sk(parent)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
@@ -975,18 +999,16 @@ done:
static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
- struct hlist_node *node;
- read_lock_bh(&rfcomm_sk_list.lock);
+ read_lock(&rfcomm_sk_list.lock);
- sk_for_each(sk, node, &rfcomm_sk_list.head) {
- seq_printf(f, "%s %s %d %d\n",
- batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst),
- sk->sk_state, rfcomm_pi(sk)->channel);
+ sk_for_each(sk, &rfcomm_sk_list.head) {
+ seq_printf(f, "%pMR %pMR %d %d\n",
+ &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,
+ sk->sk_state, rfcomm_pi(sk)->channel);
}
- read_unlock_bh(&rfcomm_sk_list.lock);
+ read_unlock(&rfcomm_sk_list.lock);
return 0;
}
@@ -1040,32 +1062,41 @@ int __init rfcomm_init_sockets(void)
return err;
err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
- if (err < 0)
+ if (err < 0) {
+ BT_ERR("RFCOMM socket layer registration failed");
goto error;
+ }
- if (bt_debugfs) {
- rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
- bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
- if (!rfcomm_sock_debugfs)
- BT_ERR("Failed to create RFCOMM debug file");
+ err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL);
+ if (err < 0) {
+ BT_ERR("Failed to create RFCOMM proc file");
+ bt_sock_unregister(BTPROTO_RFCOMM);
+ goto error;
}
BT_INFO("RFCOMM socket layer initialized");
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
+ bt_debugfs, NULL,
+ &rfcomm_sock_debugfs_fops);
+
return 0;
error:
- BT_ERR("RFCOMM socket layer registration failed");
proto_unregister(&rfcomm_proto);
return err;
}
void __exit rfcomm_cleanup_sockets(void)
{
+ bt_procfs_cleanup(&init_net, "rfcomm");
+
debugfs_remove(rfcomm_sock_debugfs);
- if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
- BT_ERR("RFCOMM socket layer unregistration failed");
+ bt_sock_unregister(BTPROTO_RFCOMM);
proto_unregister(&rfcomm_proto);
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d7b9af4703d..8e385a0ae60 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,10 +31,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-#include <linux/capability.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/rfcomm.h>
@@ -44,18 +40,20 @@
#define RFCOMM_TTY_MAJOR 216 /* device node major id of the usb/bluetooth.c driver */
#define RFCOMM_TTY_MINOR 0
+static DEFINE_MUTEX(rfcomm_ioctl_mutex);
static struct tty_driver *rfcomm_tty_driver;
struct rfcomm_dev {
+ struct tty_port port;
struct list_head list;
- atomic_t refcnt;
char name[12];
int id;
unsigned long flags;
- atomic_t opened;
int err;
+ unsigned long status; /* don't export to userspace */
+
bdaddr_t src;
bdaddr_t dst;
u8 channel;
@@ -63,9 +61,6 @@ struct rfcomm_dev {
uint modem_status;
struct rfcomm_dlc *dlc;
- struct tty_struct *tty;
- wait_queue_head_t wait;
- struct tasklet_struct wakeup_task;
struct device *tty_dev;
@@ -75,26 +70,21 @@ struct rfcomm_dev {
};
static LIST_HEAD(rfcomm_dev_list);
-static DEFINE_RWLOCK(rfcomm_dev_lock);
+static DEFINE_MUTEX(rfcomm_dev_lock);
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
-static void rfcomm_tty_wakeup(unsigned long arg);
-
/* ---- Device functions ---- */
-static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
+
+static void rfcomm_dev_destruct(struct tty_port *port)
{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
struct rfcomm_dlc *dlc = dev->dlc;
BT_DBG("dev %p dlc %p", dev, dlc);
- /* Refcount should only hit zero when called from rfcomm_dev_del()
- which will have taken us off the list. Everything else are
- refcounting bugs. */
- BUG_ON(!list_empty(&dev->list));
-
rfcomm_dlc_lock(dlc);
/* Detach DLC if it's owned by this dev */
if (dlc->owner == dev)
@@ -103,7 +93,12 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
rfcomm_dlc_put(dlc);
- tty_unregister_device(rfcomm_tty_driver, dev->id);
+ if (dev->tty_dev)
+ tty_unregister_device(rfcomm_tty_driver, dev->id);
+
+ mutex_lock(&rfcomm_dev_lock);
+ list_del(&dev->list);
+ mutex_unlock(&rfcomm_dev_lock);
kfree(dev);
@@ -112,78 +107,101 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
module_put(THIS_MODULE);
}
-static inline void rfcomm_dev_hold(struct rfcomm_dev *dev)
+/* device-specific initialization: open the dlc */
+static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
{
- atomic_inc(&dev->refcnt);
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+ int err;
+
+ err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
+ if (err)
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ return err;
}
-static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
+/* we block the open until the dlc->state becomes BT_CONNECTED */
+static int rfcomm_dev_carrier_raised(struct tty_port *port)
{
- /* The reason this isn't actually a race, as you no
- doubt have a little voice screaming at you in your
- head, is that the refcount should never actually
- reach zero unless the device has already been taken
- off the list, in rfcomm_dev_del(). And if that's not
- true, we'll hit the BUG() in rfcomm_dev_destruct()
- anyway. */
- if (atomic_dec_and_test(&dev->refcnt))
- rfcomm_dev_destruct(dev);
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ return (dev->dlc->state == BT_CONNECTED);
}
-static struct rfcomm_dev *__rfcomm_dev_get(int id)
+/* device-specific cleanup: close the dlc */
+static void rfcomm_dev_shutdown(struct tty_port *port)
+{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ if (dev->tty_dev->parent)
+ device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
+
+ /* close the dlc */
+ rfcomm_dlc_close(dev->dlc, 0);
+}
+
+static const struct tty_port_operations rfcomm_port_ops = {
+ .destruct = rfcomm_dev_destruct,
+ .activate = rfcomm_dev_activate,
+ .shutdown = rfcomm_dev_shutdown,
+ .carrier_raised = rfcomm_dev_carrier_raised,
+};
+
+static struct rfcomm_dev *__rfcomm_dev_lookup(int id)
{
struct rfcomm_dev *dev;
- struct list_head *p;
- list_for_each(p, &rfcomm_dev_list) {
- dev = list_entry(p, struct rfcomm_dev, list);
+ list_for_each_entry(dev, &rfcomm_dev_list, list)
if (dev->id == id)
return dev;
- }
return NULL;
}
-static inline struct rfcomm_dev *rfcomm_dev_get(int id)
+static struct rfcomm_dev *rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
- read_lock(&rfcomm_dev_lock);
+ mutex_lock(&rfcomm_dev_lock);
- dev = __rfcomm_dev_get(id);
+ dev = __rfcomm_dev_lookup(id);
- if (dev) {
- if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
- dev = NULL;
- else
- rfcomm_dev_hold(dev);
- }
+ if (dev && !tty_port_get(&dev->port))
+ dev = NULL;
- read_unlock(&rfcomm_dev_lock);
+ mutex_unlock(&rfcomm_dev_lock);
return dev;
}
-static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
+static void rfcomm_reparent_device(struct rfcomm_dev *dev)
{
struct hci_dev *hdev;
struct hci_conn *conn;
hdev = hci_get_route(&dev->dst, &dev->src);
if (!hdev)
- return NULL;
+ return;
+ /* The lookup results are unsafe to access without the
+ * hci device lock (FIXME: why is this not documented?)
+ */
+ hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
- hci_dev_put(hdev);
+ /* Just because the acl link is in the hash table is no
+ * guarantee the sysfs device has been added ...
+ */
+ if (conn && device_is_registered(&conn->dev))
+ device_move(dev->tty_dev, &conn->dev, DPM_ORDER_DEV_AFTER_PARENT);
- return conn ? &conn->dev : NULL;
+ hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
}
static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
{
struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
- return sprintf(buf, "%s\n", batostr(&dev->dst));
+ return sprintf(buf, "%pMR\n", &dev->dst);
}
static ssize_t show_channel(struct device *tty_dev, struct device_attribute *attr, char *buf)
@@ -195,36 +213,33 @@ static ssize_t show_channel(struct device *tty_dev, struct device_attribute *att
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
-static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
+static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
+ struct rfcomm_dlc *dlc)
{
- struct rfcomm_dev *dev;
- struct list_head *head = &rfcomm_dev_list, *p;
+ struct rfcomm_dev *dev, *entry;
+ struct list_head *head = &rfcomm_dev_list;
int err = 0;
- BT_DBG("id %d channel %d", req->dev_id, req->channel);
-
dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
if (!dev)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- write_lock_bh(&rfcomm_dev_lock);
+ mutex_lock(&rfcomm_dev_lock);
if (req->dev_id < 0) {
dev->id = 0;
- list_for_each(p, &rfcomm_dev_list) {
- if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
+ if (entry->id != dev->id)
break;
dev->id++;
- head = p;
+ head = &entry->list;
}
} else {
dev->id = req->dev_id;
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
-
+ list_for_each_entry(entry, &rfcomm_dev_list, list) {
if (entry->id == dev->id) {
err = -EADDRINUSE;
goto out;
@@ -233,7 +248,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
if (entry->id > dev->id - 1)
break;
- head = p;
+ head = &entry->list;
}
}
@@ -245,7 +260,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
sprintf(dev->name, "rfcomm%d", dev->id);
list_add(&dev->list, head);
- atomic_set(&dev->refcnt, 1);
bacpy(&dev->src, &req->src);
bacpy(&dev->dst, &req->dst);
@@ -254,10 +268,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
dev->flags = req->flags &
((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC));
- atomic_set(&dev->opened, 0);
-
- init_waitqueue_head(&dev->wait);
- tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
+ tty_port_init(&dev->port);
+ dev->port.ops = &rfcomm_port_ops;
skb_queue_head_init(&dev->pending);
@@ -293,20 +305,37 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
holds reference to this module. */
__module_get(THIS_MODULE);
+ mutex_unlock(&rfcomm_dev_lock);
+ return dev;
+
out:
- write_unlock_bh(&rfcomm_dev_lock);
+ mutex_unlock(&rfcomm_dev_lock);
+ kfree(dev);
+ return ERR_PTR(err);
+}
- if (err < 0)
- goto free;
+static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
+{
+ struct rfcomm_dev *dev;
+ struct device *tty;
+
+ BT_DBG("id %d channel %d", req->dev_id, req->channel);
- dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL);
+ dev = __rfcomm_dev_add(req, dlc);
+ if (IS_ERR(dev)) {
+ rfcomm_dlc_put(dlc);
+ return PTR_ERR(dev);
+ }
- if (IS_ERR(dev->tty_dev)) {
- err = PTR_ERR(dev->tty_dev);
- list_del(&dev->list);
- goto free;
+ tty = tty_port_register_device(&dev->port, rfcomm_tty_driver,
+ dev->id, NULL);
+ if (IS_ERR(tty)) {
+ tty_port_put(&dev->port);
+ return PTR_ERR(tty);
}
+ dev->tty_dev = tty;
+ rfcomm_reparent_device(dev);
dev_set_drvdata(dev->tty_dev, dev);
if (device_create_file(dev->tty_dev, &dev_attr_address) < 0)
@@ -316,70 +345,49 @@ out:
BT_ERR("Failed to create channel attribute");
return dev->id;
-
-free:
- kfree(dev);
- return err;
}
-static void rfcomm_dev_del(struct rfcomm_dev *dev)
+/* ---- Send buffer ---- */
+static inline unsigned int rfcomm_room(struct rfcomm_dev *dev)
{
- BT_DBG("dev %p", dev);
-
- BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
-
- if (atomic_read(&dev->opened) > 0)
- return;
-
- write_lock_bh(&rfcomm_dev_lock);
- list_del_init(&dev->list);
- write_unlock_bh(&rfcomm_dev_lock);
+ struct rfcomm_dlc *dlc = dev->dlc;
- rfcomm_dev_put(dev);
-}
+ /* Limit the outstanding number of packets not yet sent to 40 */
+ int pending = 40 - atomic_read(&dev->wmem_alloc);
-/* ---- Send buffer ---- */
-static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
-{
- /* We can't let it be zero, because we don't get a callback
- when tx_credits becomes nonzero, hence we'd never wake up */
- return dlc->mtu * (dlc->tx_credits?:1);
+ return max(0, pending) * dlc->mtu;
}
static void rfcomm_wfree(struct sk_buff *skb)
{
struct rfcomm_dev *dev = (void *) skb->sk;
- atomic_sub(skb->truesize, &dev->wmem_alloc);
+ atomic_dec(&dev->wmem_alloc);
if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
- tasklet_schedule(&dev->wakeup_task);
- rfcomm_dev_put(dev);
+ tty_port_tty_wakeup(&dev->port);
+ tty_port_put(&dev->port);
}
-static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
+static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
{
- rfcomm_dev_hold(dev);
- atomic_add(skb->truesize, &dev->wmem_alloc);
+ tty_port_get(&dev->port);
+ atomic_inc(&dev->wmem_alloc);
skb->sk = (void *) dev;
skb->destructor = rfcomm_wfree;
}
static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority)
{
- if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) {
- struct sk_buff *skb = alloc_skb(size, priority);
- if (skb) {
- rfcomm_set_owner_w(skb, dev);
- return skb;
- }
- }
- return NULL;
+ struct sk_buff *skb = alloc_skb(size, priority);
+ if (skb)
+ rfcomm_set_owner_w(skb, dev);
+ return skb;
}
/* ---- Device IOCTLs ---- */
#define NOCAP_FLAGS ((1 << RFCOMM_REUSE_DLC) | (1 << RFCOMM_RELEASE_ONHUP))
-static int rfcomm_create_dev(struct sock *sk, void __user *arg)
+static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
{
struct rfcomm_dev_req req;
struct rfcomm_dlc *dlc;
@@ -401,16 +409,22 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)
dlc = rfcomm_pi(sk)->dlc;
rfcomm_dlc_hold(dlc);
} else {
+ /* Validate the channel is unused */
+ dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
+ if (IS_ERR(dlc))
+ return PTR_ERR(dlc);
+ else if (dlc) {
+ rfcomm_dlc_put(dlc);
+ return -EBUSY;
+ }
dlc = rfcomm_dlc_alloc(GFP_KERNEL);
if (!dlc)
return -ENOMEM;
}
id = rfcomm_dev_add(&req, dlc);
- if (id < 0) {
- rfcomm_dlc_put(dlc);
+ if (id < 0)
return id;
- }
if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
/* DLC is now used by device.
@@ -421,10 +435,11 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)
return id;
}
-static int rfcomm_release_dev(void __user *arg)
+static int __rfcomm_release_dev(void __user *arg)
{
struct rfcomm_dev_req req;
struct rfcomm_dev *dev;
+ struct tty_struct *tty;
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
@@ -436,28 +451,60 @@ static int rfcomm_release_dev(void __user *arg)
return -ENODEV;
if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) {
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return -EPERM;
}
+ /* only release once */
+ if (test_and_set_bit(RFCOMM_DEV_RELEASED, &dev->status)) {
+ tty_port_put(&dev->port);
+ return -EALREADY;
+ }
+
if (req.flags & (1 << RFCOMM_HANGUP_NOW))
rfcomm_dlc_close(dev->dlc, 0);
/* Shut down TTY synchronously before freeing rfcomm_dev */
- if (dev->tty)
- tty_vhangup(dev->tty);
+ tty = tty_port_tty_get(&dev->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+
+ if (!test_bit(RFCOMM_TTY_OWNED, &dev->status))
+ tty_port_put(&dev->port);
- if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
- rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return 0;
}
+static int rfcomm_create_dev(struct sock *sk, void __user *arg)
+{
+ int ret;
+
+ mutex_lock(&rfcomm_ioctl_mutex);
+ ret = __rfcomm_create_dev(sk, arg);
+ mutex_unlock(&rfcomm_ioctl_mutex);
+
+ return ret;
+}
+
+static int rfcomm_release_dev(void __user *arg)
+{
+ int ret;
+
+ mutex_lock(&rfcomm_ioctl_mutex);
+ ret = __rfcomm_release_dev(arg);
+ mutex_unlock(&rfcomm_ioctl_mutex);
+
+ return ret;
+}
+
static int rfcomm_get_dev_list(void __user *arg)
{
+ struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
- struct list_head *p;
int n = 0, size, err;
u16 dev_num;
@@ -471,17 +518,16 @@ static int rfcomm_get_dev_list(void __user *arg)
size = sizeof(*dl) + dev_num * sizeof(*di);
- dl = kmalloc(size, GFP_KERNEL);
+ dl = kzalloc(size, GFP_KERNEL);
if (!dl)
return -ENOMEM;
di = dl->dev_info;
- read_lock_bh(&rfcomm_dev_lock);
+ mutex_lock(&rfcomm_dev_lock);
- list_for_each(p, &rfcomm_dev_list) {
- struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
- if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
+ list_for_each_entry(dev, &rfcomm_dev_list, list) {
+ if (!tty_port_get(&dev->port))
continue;
(di + n)->id = dev->id;
(di + n)->flags = dev->flags;
@@ -489,11 +535,12 @@ static int rfcomm_get_dev_list(void __user *arg)
(di + n)->channel = dev->channel;
bacpy(&(di + n)->src, &dev->src);
bacpy(&(di + n)->dst, &dev->dst);
+ tty_port_put(&dev->port);
if (++n >= dev_num)
break;
}
- read_unlock_bh(&rfcomm_dev_lock);
+ mutex_unlock(&rfcomm_dev_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*di);
@@ -528,7 +575,7 @@ static int rfcomm_get_dev_info(void __user *arg)
if (copy_to_user(arg, &di, sizeof(di)))
err = -EFAULT;
- rfcomm_dev_put(dev);
+ tty_port_put(&dev->port);
return err;
}
@@ -557,23 +604,21 @@ int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
{
struct rfcomm_dev *dev = dlc->owner;
- struct tty_struct *tty;
if (!dev) {
kfree_skb(skb);
return;
}
- tty = dev->tty;
- if (!tty || !skb_queue_empty(&dev->pending)) {
+ if (!skb_queue_empty(&dev->pending)) {
skb_queue_tail(&dev->pending, skb);
return;
}
- BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len);
+ BT_DBG("dlc %p len %d", dlc, skb->len);
- tty_insert_flip_string(tty, skb->data, skb->len);
- tty_flip_buffer_push(tty);
+ tty_insert_flip_string(&dev->port, skb->data, skb->len);
+ tty_flip_buffer_push(&dev->port);
kfree_skb(skb);
}
@@ -587,31 +632,12 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
dev->err = err;
- wake_up_interruptible(&dev->wait);
-
- if (dlc->state == BT_CLOSED) {
- if (!dev->tty) {
- if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
- /* Drop DLC lock here to avoid deadlock
- * 1. rfcomm_dev_get will take rfcomm_dev_lock
- * but in rfcomm_dev_add there's lock order:
- * rfcomm_dev_lock -> dlc lock
- * 2. rfcomm_dev_put will deadlock if it's
- * the last reference
- */
- rfcomm_dlc_unlock(dlc);
- if (rfcomm_dev_get(dev->id) == NULL) {
- rfcomm_dlc_lock(dlc);
- return;
- }
-
- rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
- rfcomm_dlc_lock(dlc);
- }
- } else
- tty_hangup(dev->tty);
- }
+ if (dlc->state == BT_CONNECTED) {
+ rfcomm_reparent_device(dev);
+
+ wake_up_interruptible(&dev->port.open_wait);
+ } else if (dlc->state == BT_CLOSED)
+ tty_port_tty_hangup(&dev->port, false);
}
static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
@@ -622,10 +648,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
- if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
- if (dev->tty && !C_CLOCAL(dev->tty))
- tty_hangup(dev->tty);
- }
+ if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV))
+ tty_port_tty_hangup(&dev->port, true);
dev->modem_status =
((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
@@ -635,150 +659,125 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
}
/* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(unsigned long arg)
-{
- struct rfcomm_dev *dev = (void *) arg;
- struct tty_struct *tty = dev->tty;
- if (!tty)
- return;
-
- BT_DBG("dev %p tty %p", dev, tty);
- tty_wakeup(tty);
-}
-
static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
{
- struct tty_struct *tty = dev->tty;
struct sk_buff *skb;
int inserted = 0;
- if (!tty)
- return;
-
- BT_DBG("dev %p tty %p", dev, tty);
+ BT_DBG("dev %p", dev);
rfcomm_dlc_lock(dev->dlc);
while ((skb = skb_dequeue(&dev->pending))) {
- inserted += tty_insert_flip_string(tty, skb->data, skb->len);
+ inserted += tty_insert_flip_string(&dev->port, skb->data,
+ skb->len);
kfree_skb(skb);
}
rfcomm_dlc_unlock(dev->dlc);
if (inserted > 0)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&dev->port);
}
-static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+/* do the reverse of install, clearing the tty fields and releasing the
+ * reference to tty_port
+ */
+static void rfcomm_tty_cleanup(struct tty_struct *tty)
{
- DECLARE_WAITQUEUE(wait, current);
- struct rfcomm_dev *dev;
- struct rfcomm_dlc *dlc;
- int err, id;
+ struct rfcomm_dev *dev = tty->driver_data;
- id = tty->index;
+ clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- BT_DBG("tty %p id %d", tty, id);
+ rfcomm_dlc_lock(dev->dlc);
+ tty->driver_data = NULL;
+ rfcomm_dlc_unlock(dev->dlc);
- /* We don't leak this refcount. For reasons which are not entirely
- clear, the TTY layer will call our ->close() method even if the
- open fails. We decrease the refcount there, and decreasing it
- here too would cause breakage. */
- dev = rfcomm_dev_get(id);
- if (!dev)
- return -ENODEV;
+ /*
+ * purge the dlc->tx_queue to avoid circular dependencies
+ * between dev and dlc
+ */
+ skb_queue_purge(&dev->dlc->tx_queue);
- BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
- dev->channel, atomic_read(&dev->opened));
+ tty_port_put(&dev->port);
+}
- if (atomic_inc_return(&dev->opened) > 1)
- return 0;
+/* we acquire the tty_port reference since it's here the tty is first used
+ * by setting the termios. We also populate the driver_data field and install
+ * the tty port
+ */
+static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct rfcomm_dev *dev;
+ struct rfcomm_dlc *dlc;
+ int err;
+
+ dev = rfcomm_dev_get(tty->index);
+ if (!dev)
+ return -ENODEV;
dlc = dev->dlc;
/* Attach TTY and open DLC */
-
rfcomm_dlc_lock(dlc);
tty->driver_data = dev;
- dev->tty = tty;
rfcomm_dlc_unlock(dlc);
set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel);
- if (err < 0)
+ /* install the tty_port */
+ err = tty_port_install(&dev->port, driver, tty);
+ if (err) {
+ rfcomm_tty_cleanup(tty);
return err;
+ }
- /* Wait for DLC to connect */
- add_wait_queue(&dev->wait, &wait);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
+ /* take over the tty_port reference if the port was created with the
+ * flag RFCOMM_RELEASE_ONHUP. This will force the release of the port
+ * when the last process closes the tty. The behaviour is expected by
+ * userspace.
+ */
+ if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
+ set_bit(RFCOMM_TTY_OWNED, &dev->status);
+ tty_port_put(&dev->port);
+ }
- if (dlc->state == BT_CLOSED) {
- err = -dev->err;
- break;
- }
+ return 0;
+}
- if (dlc->state == BT_CONNECTED)
- break;
+static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct rfcomm_dev *dev = tty->driver_data;
+ int err;
- if (signal_pending(current)) {
- err = -EINTR;
- break;
- }
+ BT_DBG("tty %p id %d", tty, tty->index);
- tty_unlock();
- schedule();
- tty_lock();
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->wait, &wait);
+ BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+ dev->channel, dev->port.count);
- if (err == 0)
- device_move(dev->tty_dev, rfcomm_get_device(dev),
- DPM_ORDER_DEV_AFTER_PARENT);
+ err = tty_port_open(&dev->port, tty, filp);
+ if (err)
+ return err;
+ /*
+ * FIXME: rfcomm should use proper flow control for
+ * received data. This hack will be unnecessary and can
+ * be removed when that's implemented
+ */
rfcomm_tty_copy_pending(dev);
rfcomm_dlc_unthrottle(dev->dlc);
- return err;
+ return 0;
}
static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- if (!dev)
- return;
BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
- atomic_read(&dev->opened));
-
- if (atomic_dec_and_test(&dev->opened)) {
- if (dev->tty_dev->parent)
- device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
-
- /* Close DLC and dettach TTY */
- rfcomm_dlc_close(dev->dlc, 0);
-
- clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- tasklet_kill(&dev->wakeup_task);
+ dev->port.count);
- rfcomm_dlc_lock(dev->dlc);
- tty->driver_data = NULL;
- dev->tty = NULL;
- rfcomm_dlc_unlock(dev->dlc);
-
- if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
- write_lock_bh(&rfcomm_dev_lock);
- list_del_init(&dev->list);
- write_unlock_bh(&rfcomm_dev_lock);
-
- rfcomm_dev_put(dev);
- }
- }
-
- rfcomm_dev_put(dev);
+ tty_port_close(&dev->port, tty, filp);
}
static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -786,7 +785,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
struct rfcomm_dlc *dlc = dev->dlc;
struct sk_buff *skb;
- int err = 0, sent = 0, size;
+ int sent = 0, size;
BT_DBG("tty %p count %d", tty, count);
@@ -794,7 +793,6 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
size = min_t(uint, count, dlc->mtu);
skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC);
-
if (!skb)
break;
@@ -802,37 +800,29 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
memcpy(skb_put(skb, size), buf + sent, size);
- err = rfcomm_dlc_send(dlc, skb);
- if (err < 0) {
- kfree_skb(skb);
- break;
- }
+ rfcomm_dlc_send_noerror(dlc, skb);
sent += size;
count -= size;
}
- return sent ? sent : err;
+ return sent;
}
static int rfcomm_tty_write_room(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- int room;
+ int room = 0;
- BT_DBG("tty %p", tty);
+ if (dev && dev->dlc)
+ room = rfcomm_room(dev);
- if (!dev || !dev->dlc)
- return 0;
-
- room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc);
- if (room < 0)
- room = 0;
+ BT_DBG("tty %p room %d", tty, room);
return room;
}
-static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg)
+static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
BT_DBG("tty %p cmd 0x%02x", tty, cmd);
@@ -879,7 +869,7 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, struct file *filp, unsigned
static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
- struct ktermios *new = tty->termios;
+ struct ktermios *new = &tty->termios;
int old_baud_rate = tty_termios_baud_rate(old);
int new_baud_rate = tty_termios_baud_rate(new);
@@ -1078,20 +1068,10 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
BT_DBG("tty %p dev %p", tty, dev);
- if (!dev)
- return;
-
- rfcomm_tty_flush_buffer(tty);
-
- if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
- if (rfcomm_dev_get(dev->id) == NULL)
- return;
- rfcomm_dev_del(dev);
- rfcomm_dev_put(dev);
- }
+ tty_port_hangup(&dev->port);
}
-static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp)
+static int rfcomm_tty_tiocmget(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
@@ -1100,7 +1080,7 @@ static int rfcomm_tty_tiocmget(struct tty_struct *tty, struct file *filp)
return dev->modem_status;
}
-static int rfcomm_tty_tiocmset(struct tty_struct *tty, struct file *filp, unsigned int set, unsigned int clear)
+static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
struct rfcomm_dlc *dlc = dev->dlc;
@@ -1151,15 +1131,18 @@ static const struct tty_operations rfcomm_ops = {
.wait_until_sent = rfcomm_tty_wait_until_sent,
.tiocmget = rfcomm_tty_tiocmget,
.tiocmset = rfcomm_tty_tiocmset,
+ .install = rfcomm_tty_install,
+ .cleanup = rfcomm_tty_cleanup,
};
int __init rfcomm_init_ttys(void)
{
+ int error;
+
rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
if (!rfcomm_tty_driver)
- return -1;
+ return -ENOMEM;
- rfcomm_tty_driver->owner = THIS_MODULE;
rfcomm_tty_driver->driver_name = "rfcomm";
rfcomm_tty_driver->name = "rfcomm";
rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR;
@@ -1168,14 +1151,15 @@ int __init rfcomm_init_ttys(void)
rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
rfcomm_tty_driver->init_termios = tty_std_termios;
- rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
- if (tty_register_driver(rfcomm_tty_driver)) {
+ error = tty_register_driver(rfcomm_tty_driver);
+ if (error) {
BT_ERR("Can't register RFCOMM TTY driver");
put_tty_driver(rfcomm_tty_driver);
- return -1;
+ return error;
}
BT_INFO("RFCOMM TTY layer initialized");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 960c6d1637d..c06dbd3938e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,34 +25,14 @@
/* Bluetooth SCO sockets. */
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/list.h>
-#include <net/sock.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
-#define VERSION "0.6"
-
-static int disable_esco;
+static bool disable_esco;
static const struct proto_ops sco_sock_ops;
@@ -63,8 +43,6 @@ static struct bt_sock_list sco_sk_list = {
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
-static int sco_conn_del(struct hci_conn *conn, int err);
-
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@@ -97,15 +75,15 @@ static void sco_sock_clear_timer(struct sock *sk)
}
/* ---- SCO connections ---- */
-static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
+static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
- if (conn || status)
+ if (conn)
return conn;
- conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
+ conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL);
if (!conn)
return NULL;
@@ -114,9 +92,6 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
hcon->sco_data = conn;
conn->hcon = hcon;
- conn->src = &hdev->bdaddr;
- conn->dst = &hcon->dst;
-
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
@@ -127,7 +102,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
return conn;
}
-static inline struct sock *sco_chan_get(struct sco_conn *conn)
+static struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
@@ -161,7 +136,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
return 0;
}
-static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
+static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
+ struct sock *parent)
{
int err = 0;
@@ -177,40 +153,46 @@ static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct so
static int sco_connect(struct sock *sk)
{
- bdaddr_t *src = &bt_sk(sk)->src;
- bdaddr_t *dst = &bt_sk(sk)->dst;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
- BT_DBG("%s -> %s", batostr(src), batostr(dst));
+ BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
- hdev = hci_get_route(dst, src);
+ hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);
if (!hdev)
return -EHOSTUNREACH;
- hci_dev_lock_bh(hdev);
-
- err = -ENOMEM;
+ hci_dev_lock(hdev);
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
- if (!hcon)
+ if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
+ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
+ err = -EOPNOTSUPP;
goto done;
+ }
- conn = sco_conn_add(hcon, 0);
+ hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
+ sco_pi(sk)->setting);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto done;
+ }
+
+ conn = sco_conn_add(hcon);
if (!conn) {
- hci_conn_put(hcon);
+ hci_conn_drop(hcon);
+ err = -ENOMEM;
goto done;
}
/* Update source addr of the socket */
- bacpy(src, conn->src);
+ bacpy(&sco_pi(sk)->src, &hcon->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
@@ -225,16 +207,16 @@ static int sco_connect(struct sock *sk)
}
done:
- hci_dev_unlock_bh(hdev);
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
}
-static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
+static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
- int err, count;
+ int err;
/* Check outgoing MTU */
if (len > conn->mtu)
@@ -242,23 +224,21 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
- count = min_t(unsigned int, conn->mtu, len);
- skb = bt_skb_send_alloc(sk, count,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
- return count;
+ return len;
}
-static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
+static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
@@ -278,17 +258,19 @@ drop:
}
/* -------- Socket interface ---------- */
-static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
+static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &sco_sk_list.head)
- if (!bacmp(&bt_sk(sk)->src, ba))
- goto found;
- sk = NULL;
-found:
- return sk;
+ sk_for_each(sk, &sco_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
+ continue;
+
+ if (!bacmp(&sco_pi(sk)->src, ba))
+ return sk;
+ }
+
+ return NULL;
}
/* Find socket listening on source bdaddr.
@@ -297,26 +279,25 @@ found:
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
read_lock(&sco_sk_list.lock);
- sk_for_each(sk, node, &sco_sk_list.head) {
+ sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
+ if (!bacmp(&sco_pi(sk)->src, src))
break;
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
- return node ? sk : sk1;
+ return sk ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
@@ -370,6 +351,16 @@ static void __sco_sock_close(struct sock *sk)
case BT_CONNECTED:
case BT_CONFIG:
+ if (sco_pi(sk)->conn->hcon) {
+ sk->sk_state = BT_DISCONN;
+ sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
+ hci_conn_drop(sco_pi(sk)->conn->hcon);
+ sco_pi(sk)->conn->hcon = NULL;
+ } else
+ sco_chan_del(sk, ECONNRESET);
+ break;
+
+ case BT_CONNECT2:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
@@ -395,8 +386,11 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
- if (parent)
+ if (parent) {
sk->sk_type = parent->sk_type;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
+ security_sk_clone(parent, sk);
+ }
}
static struct proto sco_proto = {
@@ -424,6 +418,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
+ sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
+
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
@@ -456,10 +452,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
+ BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
@@ -471,17 +466,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
goto done;
}
- write_lock_bh(&sco_sk_list.lock);
-
- if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
- sk->sk_state = BT_BOUND;
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
}
- write_unlock_bh(&sco_sk_list.lock);
+ bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);
+
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -492,8 +484,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- int err = 0;
-
+ int err;
BT_DBG("sk %p", sk);
@@ -510,14 +501,14 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
lock_sock(sk);
/* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
err = sco_connect(sk);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
@@ -527,21 +518,38 @@ done:
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ bdaddr_t *src = &sco_pi(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ write_lock(&sco_sk_list.lock);
+
+ if (__sco_get_sock_listen_by_addr(src)) {
+ err = -EADDRINUSE;
+ goto unlock;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+
sk->sk_state = BT_LISTEN;
+unlock:
+ write_unlock(&sco_sk_list.lock);
+
done:
release_sock(sk);
return err;
@@ -556,30 +564,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
lock_sock(sk);
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(ch = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
+ ch = bt_accept_dequeue(sk, newsock);
+ if (ch)
+ break;
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
@@ -587,8 +591,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -614,9 +622,9 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
*len = sizeof(struct sockaddr_sco);
if (peer)
- bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
+ bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);
else
- bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+ bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);
return 0;
}
@@ -647,16 +655,127 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
+static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ conn->state = BT_CONFIG;
+
+ if (!lmp_esco_capable(hdev)) {
+ struct hci_cp_accept_conn_req cp;
+
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.role = 0x00; /* Ignored */
+
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
+ } else {
+ struct hci_cp_accept_sync_conn_req cp;
+
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pkt_type = cpu_to_le16(conn->pkt_type);
+
+ cp.tx_bandwidth = cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = cpu_to_le32(0x00001f40);
+ cp.content_format = cpu_to_le16(setting);
+
+ switch (setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (conn->pkt_type & ESCO_2EV3)
+ cp.max_latency = cpu_to_le16(0x0008);
+ else
+ cp.max_latency = cpu_to_le16(0x000D);
+ cp.retrans_effort = 0x02;
+ break;
+ case SCO_AIRMODE_CVSD:
+ cp.max_latency = cpu_to_le16(0xffff);
+ cp.retrans_effort = 0xff;
+ break;
+ }
+
+ hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
+ sizeof(cp), &cp);
+ }
+}
+
+static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sco_pinfo *pi = sco_pi(sk);
+
+ lock_sock(sk);
+
+ if (sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ sco_conn_defer_accept(pi->conn->hcon, pi->setting);
+ sk->sk_state = BT_CONFIG;
+
+ release_sock(sk);
+ return 0;
+ }
+
+ release_sock(sk);
+
+ return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+}
+
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int err = 0;
+ int len, err = 0;
+ struct bt_voice voice;
+ u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (get_user(opt, (u32 __user *) optval)) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ break;
+
+ case BT_VOICE:
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
+ sk->sk_state != BT_CONNECT2) {
+ err = -EINVAL;
+ break;
+ }
+
+ voice.setting = sco_pi(sk)->setting;
+
+ len = min_t(unsigned int, sizeof(voice), optlen);
+ if (copy_from_user((char *) &voice, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ /* Explicitly check for these values */
+ if (voice.setting != BT_VOICE_TRANSPARENT &&
+ voice.setting != BT_VOICE_CVSD_16BIT) {
+ err = -EINVAL;
+ break;
+ }
+
+ sco_pi(sk)->setting = voice.setting;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -682,7 +801,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
switch (optname) {
case SCO_OPTIONS:
- if (sk->sk_state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -698,11 +819,14 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
break;
case SCO_CONNINFO:
- if (sk->sk_state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
+ memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
@@ -725,6 +849,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
{
struct sock *sk = sock->sk;
int len, err = 0;
+ struct bt_voice voice;
BT_DBG("sk %p", sk);
@@ -737,6 +862,28 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
lock_sock(sk);
switch (optname) {
+
+ case BT_DEFER_SETUP:
+ if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
+ err = -EFAULT;
+
+ break;
+
+ case BT_VOICE:
+ voice.setting = sco_pi(sk)->setting;
+
+ len = min_t(unsigned int, len, sizeof(voice));
+ if (copy_to_user(optval, (char *)&voice, len))
+ err = -EFAULT;
+
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -764,7 +911,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
+ sk->sk_lingertime);
}
release_sock(sk);
return err;
@@ -819,7 +966,9 @@ static void sco_chan_del(struct sock *sk, int err)
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
- hci_conn_put(conn->hcon);
+
+ if (conn->hcon)
+ hci_conn_drop(conn->hcon);
}
sk->sk_state = BT_CLOSED;
@@ -836,8 +985,6 @@ static void sco_conn_ready(struct sco_conn *conn)
BT_DBG("conn %p", conn);
- sco_conn_lock(conn);
-
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
@@ -845,60 +992,66 @@ static void sco_conn_ready(struct sco_conn *conn)
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
- parent = sco_get_sock_listen(conn->src);
- if (!parent)
- goto done;
+ sco_conn_lock(conn);
+
+ parent = sco_get_sock_listen(&conn->hcon->src);
+ if (!parent) {
+ sco_conn_unlock(conn);
+ return;
+ }
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
- BTPROTO_SCO, GFP_ATOMIC);
+ BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
- goto done;
+ sco_conn_unlock(conn);
+ return;
}
sco_sock_init(sk, parent);
- bacpy(&bt_sk(sk)->src, conn->src);
- bacpy(&bt_sk(sk)->dst, conn->dst);
+ bacpy(&sco_pi(sk)->src, &conn->hcon->src);
+ bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
- sk->sk_state = BT_CONNECTED;
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
+ sk->sk_state = BT_CONNECT2;
+ else
+ sk->sk_state = BT_CONNECTED;
/* Wake up parent */
- parent->sk_data_ready(parent, 1);
+ parent->sk_data_ready(parent);
bh_unlock_sock(parent);
- }
-done:
- sco_conn_unlock(conn);
+ sco_conn_unlock(conn);
+ }
}
/* ----- SCO interface with lower layer (HCI) ----- */
-static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
{
- register struct sock *sk;
- struct hlist_node *node;
+ struct sock *sk;
int lm = 0;
- if (type != SCO_LINK && type != ESCO_LINK)
- return -EINVAL;
-
- BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
+ BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
- sk_for_each(sk, node, &sco_sk_list.head) {
+ sk_for_each(sk, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
- if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
- !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+ if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) ||
+ !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
+
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
+ *flags |= HCI_PROTO_DEFER;
break;
}
}
@@ -907,38 +1060,27 @@ static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
return lm;
}
-static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
- BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
-
- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return -EINVAL;
-
+ BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
struct sco_conn *conn;
- conn = sco_conn_add(hcon, status);
+ conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
- sco_conn_del(hcon, bt_err(status));
-
- return 0;
+ sco_conn_del(hcon, bt_to_errno(status));
}
-static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
- return -EINVAL;
-
- sco_conn_del(hcon, bt_err(reason));
-
- return 0;
+ sco_conn_del(hcon, bt_to_errno(reason));
}
-static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
@@ -960,16 +1102,15 @@ drop:
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
- struct hlist_node *node;
- read_lock_bh(&sco_sk_list.lock);
+ read_lock(&sco_sk_list.lock);
- sk_for_each(sk, node, &sco_sk_list.head) {
- seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst), sk->sk_state);
+ sk_for_each(sk, &sco_sk_list.head) {
+ seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src,
+ &sco_pi(sk)->dst, sk->sk_state);
}
- read_unlock_bh(&sco_sk_list.lock);
+ read_unlock(&sco_sk_list.lock);
return 0;
}
@@ -998,7 +1139,7 @@ static const struct proto_ops sco_sock_ops = {
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
- .recvmsg = bt_sock_recvmsg,
+ .recvmsg = sco_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
@@ -1014,16 +1155,7 @@ static const struct net_proto_family sco_sock_family_ops = {
.create = sco_sock_create,
};
-static struct hci_proto sco_hci_proto = {
- .name = "SCO",
- .id = HCI_PROTO_SCO,
- .connect_ind = sco_connect_ind,
- .connect_cfm = sco_connect_cfm,
- .disconn_cfm = sco_disconn_cfm,
- .recv_scodata = sco_recv_scodata
-};
-
-static int __init sco_init(void)
+int __init sco_init(void)
{
int err;
@@ -1037,23 +1169,21 @@ static int __init sco_init(void)
goto error;
}
- err = hci_register_proto(&sco_hci_proto);
+ err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL);
if (err < 0) {
- BT_ERR("SCO protocol registration failed");
+ BT_ERR("Failed to create SCO proc file");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
- if (bt_debugfs) {
- sco_debugfs = debugfs_create_file("sco", 0444,
- bt_debugfs, NULL, &sco_debugfs_fops);
- if (!sco_debugfs)
- BT_ERR("Failed to create SCO debug file");
- }
-
- BT_INFO("SCO (Voice Link) ver %s", VERSION);
BT_INFO("SCO socket layer initialized");
+ if (IS_ERR_OR_NULL(bt_debugfs))
+ return 0;
+
+ sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
+ NULL, &sco_debugfs_fops);
+
return 0;
error:
@@ -1061,27 +1191,16 @@ error:
return err;
}
-static void __exit sco_exit(void)
+void __exit sco_exit(void)
{
- debugfs_remove(sco_debugfs);
+ bt_procfs_cleanup(&init_net, "sco");
- if (bt_sock_unregister(BTPROTO_SCO) < 0)
- BT_ERR("SCO socket unregistration failed");
+ debugfs_remove(sco_debugfs);
- if (hci_unregister_proto(&sco_hci_proto) < 0)
- BT_ERR("SCO protocol unregistration failed");
+ bt_sock_unregister(BTPROTO_SCO);
proto_unregister(&sco_proto);
}
-module_init(sco_init);
-module_exit(sco_exit);
-
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
-
-MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("bt-proto-2");
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
new file mode 100644
index 00000000000..e33a982161c
--- /dev/null
+++ b/net/bluetooth/smp.c
@@ -0,0 +1,1406 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/b128ops.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "smp.h"
+
+#define SMP_TIMEOUT msecs_to_jiffies(30000)
+
+#define AUTH_REQ_MASK 0x07
+
+#define SMP_FLAG_TK_VALID 1
+#define SMP_FLAG_CFM_PENDING 2
+#define SMP_FLAG_MITM_AUTH 3
+#define SMP_FLAG_COMPLETE 4
+#define SMP_FLAG_INITIATOR 5
+
+struct smp_chan {
+ struct l2cap_conn *conn;
+ u8 preq[7]; /* SMP Pairing Request */
+ u8 prsp[7]; /* SMP Pairing Response */
+ u8 prnd[16]; /* SMP Pairing Random (local) */
+ u8 rrnd[16]; /* SMP Pairing Random (remote) */
+ u8 pcnf[16]; /* SMP Pairing Confirm */
+ u8 tk[16]; /* SMP Temporary Key */
+ u8 enc_key_size;
+ u8 remote_key_dist;
+ bdaddr_t id_addr;
+ u8 id_addr_type;
+ u8 irk[16];
+ struct smp_csrk *csrk;
+ struct smp_csrk *slave_csrk;
+ struct smp_ltk *ltk;
+ struct smp_ltk *slave_ltk;
+ struct smp_irk *remote_irk;
+ unsigned long flags;
+};
+
+static inline void swap128(const u8 src[16], u8 dst[16])
+{
+ int i;
+ for (i = 0; i < 16; i++)
+ dst[15 - i] = src[i];
+}
+
+static inline void swap56(const u8 src[7], u8 dst[7])
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ dst[6 - i] = src[i];
+}
+
+static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
+{
+ struct blkcipher_desc desc;
+ struct scatterlist sg;
+ uint8_t tmp[16], data[16];
+ int err;
+
+ if (tfm == NULL) {
+ BT_ERR("tfm %p", tfm);
+ return -EINVAL;
+ }
+
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ /* The most significant octet of key corresponds to k[0] */
+ swap128(k, tmp);
+
+ err = crypto_blkcipher_setkey(tfm, tmp, 16);
+ if (err) {
+ BT_ERR("cipher setkey failed: %d", err);
+ return err;
+ }
+
+ /* Most significant octet of plaintextData corresponds to data[0] */
+ swap128(r, data);
+
+ sg_init_one(&sg, data, 16);
+
+ err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
+ if (err)
+ BT_ERR("Encrypt data error %d", err);
+
+ /* Most significant octet of encryptedData corresponds to data[0] */
+ swap128(data, r);
+
+ return err;
+}
+
+static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3])
+{
+ u8 _res[16];
+ int err;
+
+ /* r' = padding || r */
+ memcpy(_res, r, 3);
+ memset(_res + 3, 0, 13);
+
+ err = smp_e(tfm, irk, _res);
+ if (err) {
+ BT_ERR("Encrypt error");
+ return err;
+ }
+
+ /* The output of the random address function ah is:
+ * ah(h, r) = e(k, r') mod 2^24
+ * The output of the security function e is then truncated to 24 bits
+ * by taking the least significant 24 bits of the output of e as the
+ * result of ah.
+ */
+ memcpy(res, _res, 3);
+
+ return 0;
+}
+
+bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16],
+ bdaddr_t *bdaddr)
+{
+ u8 hash[3];
+ int err;
+
+ BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
+
+ err = smp_ah(tfm, irk, &bdaddr->b[3], hash);
+ if (err)
+ return false;
+
+ return !memcmp(bdaddr->b, hash, 3);
+}
+
+int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa)
+{
+ int err;
+
+ get_random_bytes(&rpa->b[3], 3);
+
+ rpa->b[5] &= 0x3f; /* Clear two most significant bits */
+ rpa->b[5] |= 0x40; /* Set second most significant bit */
+
+ err = smp_ah(tfm, irk, &rpa->b[3], rpa->b);
+ if (err < 0)
+ return err;
+
+ BT_DBG("RPA %pMR", rpa);
+
+ return 0;
+}
+
+static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
+ u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
+ u8 _rat, bdaddr_t *ra, u8 res[16])
+{
+ u8 p1[16], p2[16];
+ int err;
+
+ memset(p1, 0, 16);
+
+ /* p1 = pres || preq || _rat || _iat */
+ p1[0] = _iat;
+ p1[1] = _rat;
+ memcpy(p1 + 2, preq, 7);
+ memcpy(p1 + 9, pres, 7);
+
+ /* p2 = padding || ia || ra */
+ memcpy(p2, ra, 6);
+ memcpy(p2 + 6, ia, 6);
+ memset(p2 + 12, 0, 4);
+
+ /* res = r XOR p1 */
+ u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
+
+ /* res = e(k, res) */
+ err = smp_e(tfm, k, res);
+ if (err) {
+ BT_ERR("Encrypt data error");
+ return err;
+ }
+
+ /* res = res XOR p2 */
+ u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
+
+ /* res = e(k, res) */
+ err = smp_e(tfm, k, res);
+ if (err)
+ BT_ERR("Encrypt data error");
+
+ return err;
+}
+
+static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16],
+ u8 r2[16], u8 _r[16])
+{
+ int err;
+
+ /* Just least significant octets from r1 and r2 are considered */
+ memcpy(_r, r2, 8);
+ memcpy(_r + 8, r1, 8);
+
+ err = smp_e(tfm, k, _r);
+ if (err)
+ BT_ERR("Encrypt data error");
+
+ return err;
+}
+
+static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
+ u16 dlen, void *data)
+{
+ struct sk_buff *skb;
+ struct l2cap_hdr *lh;
+ int len;
+
+ len = L2CAP_HDR_SIZE + sizeof(code) + dlen;
+
+ if (len > conn->mtu)
+ return NULL;
+
+ skb = bt_skb_alloc(len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(sizeof(code) + dlen);
+ lh->cid = cpu_to_le16(L2CAP_CID_SMP);
+
+ memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
+
+ memcpy(skb_put(skb, dlen), data, dlen);
+
+ return skb;
+}
+
+static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
+{
+ struct sk_buff *skb = smp_build_cmd(conn, code, len, data);
+
+ BT_DBG("code 0x%2.2x", code);
+
+ if (!skb)
+ return;
+
+ skb->priority = HCI_PRIO_MAX;
+ hci_send_acl(conn->hchan, skb, 0);
+
+ cancel_delayed_work_sync(&conn->security_timer);
+ schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT);
+}
+
+static __u8 authreq_to_seclevel(__u8 authreq)
+{
+ if (authreq & SMP_AUTH_MITM)
+ return BT_SECURITY_HIGH;
+ else
+ return BT_SECURITY_MEDIUM;
+}
+
+static __u8 seclevel_to_authreq(__u8 sec_level)
+{
+ switch (sec_level) {
+ case BT_SECURITY_HIGH:
+ return SMP_AUTH_MITM | SMP_AUTH_BONDING;
+ case BT_SECURITY_MEDIUM:
+ return SMP_AUTH_BONDING;
+ default:
+ return SMP_AUTH_NONE;
+ }
+}
+
+static void build_pairing_cmd(struct l2cap_conn *conn,
+ struct smp_cmd_pairing *req,
+ struct smp_cmd_pairing *rsp, __u8 authreq)
+{
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ u8 local_dist = 0, remote_dist = 0;
+
+ if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
+ local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
+ remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
+ authreq |= SMP_AUTH_BONDING;
+ } else {
+ authreq &= ~SMP_AUTH_BONDING;
+ }
+
+ if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
+ remote_dist |= SMP_DIST_ID_KEY;
+
+ if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+ local_dist |= SMP_DIST_ID_KEY;
+
+ if (rsp == NULL) {
+ req->io_capability = conn->hcon->io_capability;
+ req->oob_flag = SMP_OOB_NOT_PRESENT;
+ req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ req->init_key_dist = local_dist;
+ req->resp_key_dist = remote_dist;
+ req->auth_req = (authreq & AUTH_REQ_MASK);
+
+ smp->remote_key_dist = remote_dist;
+ return;
+ }
+
+ rsp->io_capability = conn->hcon->io_capability;
+ rsp->oob_flag = SMP_OOB_NOT_PRESENT;
+ rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ rsp->init_key_dist = req->init_key_dist & remote_dist;
+ rsp->resp_key_dist = req->resp_key_dist & local_dist;
+ rsp->auth_req = (authreq & AUTH_REQ_MASK);
+
+ smp->remote_key_dist = rsp->init_key_dist;
+}
+
+static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
+{
+ struct smp_chan *smp = conn->smp_chan;
+
+ if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
+ (max_key_size < SMP_MIN_ENC_KEY_SIZE))
+ return SMP_ENC_KEY_SIZE;
+
+ smp->enc_key_size = max_key_size;
+
+ return 0;
+}
+
+static void smp_failure(struct l2cap_conn *conn, u8 reason)
+{
+ struct hci_conn *hcon = conn->hcon;
+
+ if (reason)
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+ &reason);
+
+ clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
+ mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type,
+ HCI_ERROR_AUTH_FAILURE);
+
+ cancel_delayed_work_sync(&conn->security_timer);
+
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ smp_chan_destroy(conn);
+}
+
+#define JUST_WORKS 0x00
+#define JUST_CFM 0x01
+#define REQ_PASSKEY 0x02
+#define CFM_PASSKEY 0x03
+#define REQ_OOB 0x04
+#define OVERLAP 0xFF
+
+static const u8 gen_method[5][5] = {
+ { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+ { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY },
+ { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY },
+ { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM },
+ { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
+};
+
+static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
+{
+ /* If either side has unknown io_caps, use JUST WORKS */
+ if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
+ remote_io > SMP_IO_KEYBOARD_DISPLAY)
+ return JUST_WORKS;
+
+ return gen_method[remote_io][local_io];
+}
+
+static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
+ u8 local_io, u8 remote_io)
+{
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp = conn->smp_chan;
+ u8 method;
+ u32 passkey = 0;
+ int ret = 0;
+
+ /* Initialize key for JUST WORKS */
+ memset(smp->tk, 0, sizeof(smp->tk));
+ clear_bit(SMP_FLAG_TK_VALID, &smp->flags);
+
+ BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
+
+ /* If neither side wants MITM, use JUST WORKS */
+ /* Otherwise, look up method from the table */
+ if (!(auth & SMP_AUTH_MITM))
+ method = JUST_WORKS;
+ else
+ method = get_auth_method(smp, local_io, remote_io);
+
+ /* If not bonding, don't ask user to confirm a Zero TK */
+ if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
+ method = JUST_WORKS;
+
+ /* Don't confirm locally initiated pairing attempts */
+ if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags))
+ method = JUST_WORKS;
+
+ /* If Just Works, Continue with Zero TK */
+ if (method == JUST_WORKS) {
+ set_bit(SMP_FLAG_TK_VALID, &smp->flags);
+ return 0;
+ }
+
+ /* Not Just Works/Confirm results in MITM Authentication */
+ if (method != JUST_CFM)
+ set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
+
+ /* If both devices have Keyoard-Display I/O, the master
+ * Confirms and the slave Enters the passkey.
+ */
+ if (method == OVERLAP) {
+ if (hcon->link_mode & HCI_LM_MASTER)
+ method = CFM_PASSKEY;
+ else
+ method = REQ_PASSKEY;
+ }
+
+ /* Generate random passkey. */
+ if (method == CFM_PASSKEY) {
+ memset(smp->tk, 0, sizeof(smp->tk));
+ get_random_bytes(&passkey, sizeof(passkey));
+ passkey %= 1000000;
+ put_unaligned_le32(passkey, smp->tk);
+ BT_DBG("PassKey: %d", passkey);
+ set_bit(SMP_FLAG_TK_VALID, &smp->flags);
+ }
+
+ hci_dev_lock(hcon->hdev);
+
+ if (method == REQ_PASSKEY)
+ ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst,
+ hcon->type, hcon->dst_type);
+ else if (method == JUST_CFM)
+ ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst,
+ hcon->type, hcon->dst_type,
+ passkey, 1);
+ else
+ ret = mgmt_user_passkey_notify(hcon->hdev, &hcon->dst,
+ hcon->type, hcon->dst_type,
+ passkey, 0);
+
+ hci_dev_unlock(hcon->hdev);
+
+ return ret;
+}
+
+static u8 smp_confirm(struct smp_chan *smp)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct crypto_blkcipher *tfm = hdev->tfm_aes;
+ struct smp_cmd_pairing_confirm cp;
+ int ret;
+
+ BT_DBG("conn %p", conn);
+
+ /* Prevent mutual access to hdev->tfm_aes */
+ hci_dev_lock(hdev);
+
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+ conn->hcon->init_addr_type, &conn->hcon->init_addr,
+ conn->hcon->resp_addr_type, &conn->hcon->resp_addr,
+ cp.confirm_val);
+
+ hci_dev_unlock(hdev);
+
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
+
+ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+
+ return 0;
+}
+
+static u8 smp_random(struct smp_chan *smp)
+{
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ struct crypto_blkcipher *tfm = hdev->tfm_aes;
+ u8 confirm[16];
+ int ret;
+
+ if (IS_ERR_OR_NULL(tfm))
+ return SMP_UNSPECIFIED;
+
+ BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+ /* Prevent mutual access to hdev->tfm_aes */
+ hci_dev_lock(hdev);
+
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ hcon->init_addr_type, &hcon->init_addr,
+ hcon->resp_addr_type, &hcon->resp_addr, confirm);
+
+ hci_dev_unlock(hdev);
+
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+ BT_ERR("Pairing failed (confirmation values mismatch)");
+ return SMP_CONFIRM_FAILED;
+ }
+
+ if (hcon->out) {
+ u8 stk[16];
+ __le64 rand = 0;
+ __le16 ediv = 0;
+
+ smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, stk);
+
+ memset(stk + smp->enc_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
+ return SMP_UNSPECIFIED;
+
+ hci_le_start_enc(hcon, ediv, rand, stk);
+ hcon->enc_key_size = smp->enc_key_size;
+ } else {
+ u8 stk[16], auth;
+ __le64 rand = 0;
+ __le16 ediv = 0;
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ smp->prnd);
+
+ smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, stk);
+
+ memset(stk + smp->enc_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
+
+ if (hcon->pending_sec_level == BT_SECURITY_HIGH)
+ auth = 1;
+ else
+ auth = 0;
+
+ hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
+ HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
+ ediv, rand);
+ }
+
+ return 0;
+}
+
+static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
+{
+ struct smp_chan *smp;
+
+ smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
+ if (!smp)
+ return NULL;
+
+ smp->conn = conn;
+ conn->smp_chan = smp;
+ conn->hcon->smp_conn = conn;
+
+ hci_conn_hold(conn->hcon);
+
+ return smp;
+}
+
+void smp_chan_destroy(struct l2cap_conn *conn)
+{
+ struct smp_chan *smp = conn->smp_chan;
+ bool complete;
+
+ BUG_ON(!smp);
+
+ complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
+ mgmt_smp_complete(conn->hcon, complete);
+
+ kfree(smp->csrk);
+ kfree(smp->slave_csrk);
+
+ /* If pairing failed clean up any keys we might have */
+ if (!complete) {
+ if (smp->ltk) {
+ list_del(&smp->ltk->list);
+ kfree(smp->ltk);
+ }
+
+ if (smp->slave_ltk) {
+ list_del(&smp->slave_ltk->list);
+ kfree(smp->slave_ltk);
+ }
+
+ if (smp->remote_irk) {
+ list_del(&smp->remote_irk->list);
+ kfree(smp->remote_irk);
+ }
+ }
+
+ kfree(smp);
+ conn->smp_chan = NULL;
+ conn->hcon->smp_conn = NULL;
+ hci_conn_drop(conn->hcon);
+}
+
+int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
+{
+ struct l2cap_conn *conn = hcon->smp_conn;
+ struct smp_chan *smp;
+ u32 value;
+
+ BT_DBG("");
+
+ if (!conn)
+ return -ENOTCONN;
+
+ smp = conn->smp_chan;
+
+ switch (mgmt_op) {
+ case MGMT_OP_USER_PASSKEY_REPLY:
+ value = le32_to_cpu(passkey);
+ memset(smp->tk, 0, sizeof(smp->tk));
+ BT_DBG("PassKey: %d", value);
+ put_unaligned_le32(value, smp->tk);
+ /* Fall Through */
+ case MGMT_OP_USER_CONFIRM_REPLY:
+ set_bit(SMP_FLAG_TK_VALID, &smp->flags);
+ break;
+ case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+ case MGMT_OP_USER_CONFIRM_NEG_REPLY:
+ smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED);
+ return 0;
+ default:
+ smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED);
+ return -EOPNOTSUPP;
+ }
+
+ /* If it is our turn to send Pairing Confirm, do so now */
+ if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) {
+ u8 rsp = smp_confirm(smp);
+ if (rsp)
+ smp_failure(conn, rsp);
+ }
+
+ return 0;
+}
+
+static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+ struct smp_chan *smp;
+ u8 key_size, auth, sec_level;
+ int ret;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*req))
+ return SMP_INVALID_PARAMS;
+
+ if (conn->hcon->link_mode & HCI_LM_MASTER)
+ return SMP_CMD_NOTSUPP;
+
+ if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+ smp = smp_chan_create(conn);
+ else
+ smp = conn->smp_chan;
+
+ if (!smp)
+ return SMP_UNSPECIFIED;
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], req, sizeof(*req));
+ skb_pull(skb, sizeof(*req));
+
+ /* We didn't start the pairing, so match remote */
+ auth = req->auth_req;
+
+ sec_level = authreq_to_seclevel(auth);
+ if (sec_level > conn->hcon->pending_sec_level)
+ conn->hcon->pending_sec_level = sec_level;
+
+ /* If we need MITM check that it can be acheived */
+ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+ u8 method;
+
+ method = get_auth_method(smp, conn->hcon->io_capability,
+ req->io_capability);
+ if (method == JUST_WORKS || method == JUST_CFM)
+ return SMP_AUTH_REQUIREMENTS;
+ }
+
+ build_pairing_cmd(conn, req, &rsp, auth);
+
+ key_size = min(req->max_key_size, rsp.max_key_size);
+ if (check_enc_key_size(conn, key_size))
+ return SMP_ENC_KEY_SIZE;
+
+ get_random_bytes(smp->prnd, sizeof(smp->prnd));
+
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
+
+ /* Request setup of TK */
+ ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
+
+ return 0;
+}
+
+static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
+ u8 key_size, auth = SMP_AUTH_NONE;
+ int ret;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*rsp))
+ return SMP_INVALID_PARAMS;
+
+ if (!(conn->hcon->link_mode & HCI_LM_MASTER))
+ return SMP_CMD_NOTSUPP;
+
+ skb_pull(skb, sizeof(*rsp));
+
+ req = (void *) &smp->preq[1];
+
+ key_size = min(req->max_key_size, rsp->max_key_size);
+ if (check_enc_key_size(conn, key_size))
+ return SMP_ENC_KEY_SIZE;
+
+ /* If we need MITM check that it can be acheived */
+ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+ u8 method;
+
+ method = get_auth_method(smp, req->io_capability,
+ rsp->io_capability);
+ if (method == JUST_WORKS || method == JUST_CFM)
+ return SMP_AUTH_REQUIREMENTS;
+ }
+
+ get_random_bytes(smp->prnd, sizeof(smp->prnd));
+
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
+
+ /* Update remote key distribution in case the remote cleared
+ * some bits that we had enabled in our request.
+ */
+ smp->remote_key_dist &= rsp->resp_key_dist;
+
+ if ((req->auth_req & SMP_AUTH_BONDING) &&
+ (rsp->auth_req & SMP_AUTH_BONDING))
+ auth = SMP_AUTH_BONDING;
+
+ auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
+
+ ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
+ if (ret)
+ return SMP_UNSPECIFIED;
+
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
+
+ /* Can't compose response until we have been confirmed */
+ if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
+ return smp_confirm(smp);
+
+ return 0;
+}
+
+static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_chan *smp = conn->smp_chan;
+
+ BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+ if (skb->len < sizeof(smp->pcnf))
+ return SMP_INVALID_PARAMS;
+
+ memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
+ skb_pull(skb, sizeof(smp->pcnf));
+
+ if (conn->hcon->out)
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+ smp->prnd);
+ else if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
+ return smp_confirm(smp);
+ else
+ set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
+
+ return 0;
+}
+
+static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_chan *smp = conn->smp_chan;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(smp->rrnd))
+ return SMP_INVALID_PARAMS;
+
+ memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd));
+ skb_pull(skb, sizeof(smp->rrnd));
+
+ return smp_random(smp);
+}
+
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
+{
+ struct smp_ltk *key;
+ struct hci_conn *hcon = conn->hcon;
+
+ key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
+ hcon->out);
+ if (!key)
+ return 0;
+
+ if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
+ return 0;
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
+ return 1;
+
+ hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
+ hcon->enc_key_size = key->enc_size;
+
+ return 1;
+}
+
+static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_security_req *rp = (void *) skb->data;
+ struct smp_cmd_pairing cp;
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp;
+ u8 sec_level;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*rp))
+ return SMP_INVALID_PARAMS;
+
+ if (!(conn->hcon->link_mode & HCI_LM_MASTER))
+ return SMP_CMD_NOTSUPP;
+
+ sec_level = authreq_to_seclevel(rp->auth_req);
+ if (sec_level > hcon->pending_sec_level)
+ hcon->pending_sec_level = sec_level;
+
+ if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+ return 0;
+
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ return 0;
+
+ smp = smp_chan_create(conn);
+
+ skb_pull(skb, sizeof(*rp));
+
+ memset(&cp, 0, sizeof(cp));
+ build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+
+ clear_bit(SMP_FLAG_INITIATOR, &smp->flags);
+
+ return 0;
+}
+
+bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
+{
+ if (sec_level == BT_SECURITY_LOW)
+ return true;
+
+ if (hcon->sec_level >= sec_level)
+ return true;
+
+ return false;
+}
+
+int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+{
+ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct smp_chan *smp;
+ __u8 authreq;
+
+ BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
+
+ /* This may be NULL if there's an unexpected disconnection */
+ if (!conn)
+ return 1;
+
+ if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
+ return 1;
+
+ if (smp_sufficient_security(hcon, sec_level))
+ return 1;
+
+ if (sec_level > hcon->pending_sec_level)
+ hcon->pending_sec_level = sec_level;
+
+ if (hcon->link_mode & HCI_LM_MASTER)
+ if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+ return 0;
+
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ return 0;
+
+ smp = smp_chan_create(conn);
+ if (!smp)
+ return 1;
+
+ authreq = seclevel_to_authreq(sec_level);
+
+ /* Require MITM if IO Capability allows or the security level
+ * requires it.
+ */
+ if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
+ hcon->pending_sec_level > BT_SECURITY_MEDIUM)
+ authreq |= SMP_AUTH_MITM;
+
+ if (hcon->link_mode & HCI_LM_MASTER) {
+ struct smp_cmd_pairing cp;
+
+ build_pairing_cmd(conn, &cp, NULL, authreq);
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
+
+ smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+ } else {
+ struct smp_cmd_security_req cp;
+ cp.auth_req = authreq;
+ smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
+ }
+
+ set_bit(SMP_FLAG_INITIATOR, &smp->flags);
+
+ return 0;
+}
+
+static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_encrypt_info *rp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*rp))
+ return SMP_INVALID_PARAMS;
+
+ /* Ignore this PDU if it wasn't requested */
+ if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
+ return 0;
+
+ skb_pull(skb, sizeof(*rp));
+
+ memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
+
+ return 0;
+}
+
+static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_master_ident *rp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct hci_conn *hcon = conn->hcon;
+ struct smp_ltk *ltk;
+ u8 authenticated;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*rp))
+ return SMP_INVALID_PARAMS;
+
+ /* Ignore this PDU if it wasn't requested */
+ if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
+ return 0;
+
+ /* Mark the information as received */
+ smp->remote_key_dist &= ~SMP_DIST_ENC_KEY;
+
+ skb_pull(skb, sizeof(*rp));
+
+ hci_dev_lock(hdev);
+ authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
+ ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK,
+ authenticated, smp->tk, smp->enc_key_size,
+ rp->ediv, rp->rand);
+ smp->ltk = ltk;
+ if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
+ smp_distribute_keys(conn);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_ident_info *info = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
+
+ BT_DBG("");
+
+ if (skb->len < sizeof(*info))
+ return SMP_INVALID_PARAMS;
+
+ /* Ignore this PDU if it wasn't requested */
+ if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
+ return 0;
+
+ skb_pull(skb, sizeof(*info));
+
+ memcpy(smp->irk, info->irk, 16);
+
+ return 0;
+}
+
+static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct smp_cmd_ident_addr_info *info = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_conn *hcon = conn->hcon;
+ bdaddr_t rpa;
+
+ BT_DBG("");
+
+ if (skb->len < sizeof(*info))
+ return SMP_INVALID_PARAMS;
+
+ /* Ignore this PDU if it wasn't requested */
+ if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
+ return 0;
+
+ /* Mark the information as received */
+ smp->remote_key_dist &= ~SMP_DIST_ID_KEY;
+
+ skb_pull(skb, sizeof(*info));
+
+ /* Strictly speaking the Core Specification (4.1) allows sending
+ * an empty address which would force us to rely on just the IRK
+ * as "identity information". However, since such
+ * implementations are not known of and in order to not over
+ * complicate our implementation, simply pretend that we never
+ * received an IRK for such a device.
+ */
+ if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
+ BT_ERR("Ignoring IRK with no identity address");
+ smp_distribute_keys(conn);
+ return 0;
+ }
+
+ bacpy(&smp->id_addr, &info->bdaddr);
+ smp->id_addr_type = info->addr_type;
+
+ if (hci_bdaddr_is_rpa(&hcon->dst, hcon->dst_type))
+ bacpy(&rpa, &hcon->dst);
+ else
+ bacpy(&rpa, BDADDR_ANY);
+
+ smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr,
+ smp->id_addr_type, smp->irk, &rpa);
+
+ smp_distribute_keys(conn);
+
+ return 0;
+}
+
+static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct smp_cmd_sign_info *rp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ struct smp_csrk *csrk;
+
+ BT_DBG("conn %p", conn);
+
+ if (skb->len < sizeof(*rp))
+ return SMP_INVALID_PARAMS;
+
+ /* Ignore this PDU if it wasn't requested */
+ if (!(smp->remote_key_dist & SMP_DIST_SIGN))
+ return 0;
+
+ /* Mark the information as received */
+ smp->remote_key_dist &= ~SMP_DIST_SIGN;
+
+ skb_pull(skb, sizeof(*rp));
+
+ hci_dev_lock(hdev);
+ csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
+ if (csrk) {
+ csrk->master = 0x01;
+ memcpy(csrk->val, rp->csrk, sizeof(csrk->val));
+ }
+ smp->csrk = csrk;
+ if (!(smp->remote_key_dist & SMP_DIST_SIGN))
+ smp_distribute_keys(conn);
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+ struct hci_conn *hcon = conn->hcon;
+ __u8 code, reason;
+ int err = 0;
+
+ if (hcon->type != LE_LINK) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (skb->len < 1) {
+ kfree_skb(skb);
+ return -EILSEQ;
+ }
+
+ if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
+ err = -ENOTSUPP;
+ reason = SMP_PAIRING_NOTSUPP;
+ goto done;
+ }
+
+ code = skb->data[0];
+ skb_pull(skb, sizeof(code));
+
+ /*
+ * The SMP context must be initialized for all other PDUs except
+ * pairing and security requests. If we get any other PDU when
+ * not initialized simply disconnect (done if this function
+ * returns an error).
+ */
+ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
+ !conn->smp_chan) {
+ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
+ kfree_skb(skb);
+ return -ENOTSUPP;
+ }
+
+ switch (code) {
+ case SMP_CMD_PAIRING_REQ:
+ reason = smp_cmd_pairing_req(conn, skb);
+ break;
+
+ case SMP_CMD_PAIRING_FAIL:
+ smp_failure(conn, 0);
+ reason = 0;
+ err = -EPERM;
+ break;
+
+ case SMP_CMD_PAIRING_RSP:
+ reason = smp_cmd_pairing_rsp(conn, skb);
+ break;
+
+ case SMP_CMD_SECURITY_REQ:
+ reason = smp_cmd_security_req(conn, skb);
+ break;
+
+ case SMP_CMD_PAIRING_CONFIRM:
+ reason = smp_cmd_pairing_confirm(conn, skb);
+ break;
+
+ case SMP_CMD_PAIRING_RANDOM:
+ reason = smp_cmd_pairing_random(conn, skb);
+ break;
+
+ case SMP_CMD_ENCRYPT_INFO:
+ reason = smp_cmd_encrypt_info(conn, skb);
+ break;
+
+ case SMP_CMD_MASTER_IDENT:
+ reason = smp_cmd_master_ident(conn, skb);
+ break;
+
+ case SMP_CMD_IDENT_INFO:
+ reason = smp_cmd_ident_info(conn, skb);
+ break;
+
+ case SMP_CMD_IDENT_ADDR_INFO:
+ reason = smp_cmd_ident_addr_info(conn, skb);
+ break;
+
+ case SMP_CMD_SIGN_INFO:
+ reason = smp_cmd_sign_info(conn, skb);
+ break;
+
+ default:
+ BT_DBG("Unknown command code 0x%2.2x", code);
+
+ reason = SMP_CMD_NOTSUPP;
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+done:
+ if (reason)
+ smp_failure(conn, reason);
+
+ kfree_skb(skb);
+ return err;
+}
+
+static void smp_notify_keys(struct l2cap_conn *conn)
+{
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ struct smp_cmd_pairing *req = (void *) &smp->preq[1];
+ struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
+ bool persistent;
+
+ if (smp->remote_irk) {
+ mgmt_new_irk(hdev, smp->remote_irk);
+ /* Now that user space can be considered to know the
+ * identity address track the connection based on it
+ * from now on.
+ */
+ bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
+ hcon->dst_type = smp->remote_irk->addr_type;
+ l2cap_conn_update_id_addr(hcon);
+ }
+
+ /* The LTKs and CSRKs should be persistent only if both sides
+ * had the bonding bit set in their authentication requests.
+ */
+ persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING);
+
+ if (smp->csrk) {
+ smp->csrk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->csrk->bdaddr, &hcon->dst);
+ mgmt_new_csrk(hdev, smp->csrk, persistent);
+ }
+
+ if (smp->slave_csrk) {
+ smp->slave_csrk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->slave_csrk->bdaddr, &hcon->dst);
+ mgmt_new_csrk(hdev, smp->slave_csrk, persistent);
+ }
+
+ if (smp->ltk) {
+ smp->ltk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->ltk->bdaddr, &hcon->dst);
+ mgmt_new_ltk(hdev, smp->ltk, persistent);
+ }
+
+ if (smp->slave_ltk) {
+ smp->slave_ltk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->slave_ltk->bdaddr, &hcon->dst);
+ mgmt_new_ltk(hdev, smp->slave_ltk, persistent);
+ }
+}
+
+int smp_distribute_keys(struct l2cap_conn *conn)
+{
+ struct smp_cmd_pairing *req, *rsp;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ __u8 *keydist;
+
+ BT_DBG("conn %p", conn);
+
+ if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ return 0;
+
+ rsp = (void *) &smp->prsp[1];
+
+ /* The responder sends its keys first */
+ if (hcon->out && (smp->remote_key_dist & 0x07))
+ return 0;
+
+ req = (void *) &smp->preq[1];
+
+ if (hcon->out) {
+ keydist = &rsp->init_key_dist;
+ *keydist &= req->init_key_dist;
+ } else {
+ keydist = &rsp->resp_key_dist;
+ *keydist &= req->resp_key_dist;
+ }
+
+ BT_DBG("keydist 0x%x", *keydist);
+
+ if (*keydist & SMP_DIST_ENC_KEY) {
+ struct smp_cmd_encrypt_info enc;
+ struct smp_cmd_master_ident ident;
+ struct smp_ltk *ltk;
+ u8 authenticated;
+ __le16 ediv;
+ __le64 rand;
+
+ get_random_bytes(enc.ltk, sizeof(enc.ltk));
+ get_random_bytes(&ediv, sizeof(ediv));
+ get_random_bytes(&rand, sizeof(rand));
+
+ smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
+
+ authenticated = hcon->sec_level == BT_SECURITY_HIGH;
+ ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
+ HCI_SMP_LTK_SLAVE, authenticated, enc.ltk,
+ smp->enc_key_size, ediv, rand);
+ smp->slave_ltk = ltk;
+
+ ident.ediv = ediv;
+ ident.rand = rand;
+
+ smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
+
+ *keydist &= ~SMP_DIST_ENC_KEY;
+ }
+
+ if (*keydist & SMP_DIST_ID_KEY) {
+ struct smp_cmd_ident_addr_info addrinfo;
+ struct smp_cmd_ident_info idinfo;
+
+ memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk));
+
+ smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
+
+ /* The hci_conn contains the local identity address
+ * after the connection has been established.
+ *
+ * This is true even when the connection has been
+ * established using a resolvable random address.
+ */
+ bacpy(&addrinfo.bdaddr, &hcon->src);
+ addrinfo.addr_type = hcon->src_type;
+
+ smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
+ &addrinfo);
+
+ *keydist &= ~SMP_DIST_ID_KEY;
+ }
+
+ if (*keydist & SMP_DIST_SIGN) {
+ struct smp_cmd_sign_info sign;
+ struct smp_csrk *csrk;
+
+ /* Generate a new random key */
+ get_random_bytes(sign.csrk, sizeof(sign.csrk));
+
+ csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
+ if (csrk) {
+ csrk->master = 0x00;
+ memcpy(csrk->val, sign.csrk, sizeof(csrk->val));
+ }
+ smp->slave_csrk = csrk;
+
+ smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
+
+ *keydist &= ~SMP_DIST_SIGN;
+ }
+
+ /* If there are still keys to be received wait for them */
+ if ((smp->remote_key_dist & 0x07))
+ return 0;
+
+ clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags);
+ cancel_delayed_work_sync(&conn->security_timer);
+ set_bit(SMP_FLAG_COMPLETE, &smp->flags);
+ smp_notify_keys(conn);
+
+ smp_chan_destroy(conn);
+
+ return 0;
+}
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
new file mode 100644
index 00000000000..5a8dc36460a
--- /dev/null
+++ b/net/bluetooth/smp.h
@@ -0,0 +1,132 @@
+/*
+ BlueZ - Bluetooth protocol stack for Linux
+ Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __SMP_H
+#define __SMP_H
+
+struct smp_command_hdr {
+ __u8 code;
+} __packed;
+
+#define SMP_CMD_PAIRING_REQ 0x01
+#define SMP_CMD_PAIRING_RSP 0x02
+struct smp_cmd_pairing {
+ __u8 io_capability;
+ __u8 oob_flag;
+ __u8 auth_req;
+ __u8 max_key_size;
+ __u8 init_key_dist;
+ __u8 resp_key_dist;
+} __packed;
+
+#define SMP_IO_DISPLAY_ONLY 0x00
+#define SMP_IO_DISPLAY_YESNO 0x01
+#define SMP_IO_KEYBOARD_ONLY 0x02
+#define SMP_IO_NO_INPUT_OUTPUT 0x03
+#define SMP_IO_KEYBOARD_DISPLAY 0x04
+
+#define SMP_OOB_NOT_PRESENT 0x00
+#define SMP_OOB_PRESENT 0x01
+
+#define SMP_DIST_ENC_KEY 0x01
+#define SMP_DIST_ID_KEY 0x02
+#define SMP_DIST_SIGN 0x04
+
+#define SMP_AUTH_NONE 0x00
+#define SMP_AUTH_BONDING 0x01
+#define SMP_AUTH_MITM 0x04
+
+#define SMP_CMD_PAIRING_CONFIRM 0x03
+struct smp_cmd_pairing_confirm {
+ __u8 confirm_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_RANDOM 0x04
+struct smp_cmd_pairing_random {
+ __u8 rand_val[16];
+} __packed;
+
+#define SMP_CMD_PAIRING_FAIL 0x05
+struct smp_cmd_pairing_fail {
+ __u8 reason;
+} __packed;
+
+#define SMP_CMD_ENCRYPT_INFO 0x06
+struct smp_cmd_encrypt_info {
+ __u8 ltk[16];
+} __packed;
+
+#define SMP_CMD_MASTER_IDENT 0x07
+struct smp_cmd_master_ident {
+ __le16 ediv;
+ __le64 rand;
+} __packed;
+
+#define SMP_CMD_IDENT_INFO 0x08
+struct smp_cmd_ident_info {
+ __u8 irk[16];
+} __packed;
+
+#define SMP_CMD_IDENT_ADDR_INFO 0x09
+struct smp_cmd_ident_addr_info {
+ __u8 addr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
+#define SMP_CMD_SIGN_INFO 0x0a
+struct smp_cmd_sign_info {
+ __u8 csrk[16];
+} __packed;
+
+#define SMP_CMD_SECURITY_REQ 0x0b
+struct smp_cmd_security_req {
+ __u8 auth_req;
+} __packed;
+
+#define SMP_PASSKEY_ENTRY_FAILED 0x01
+#define SMP_OOB_NOT_AVAIL 0x02
+#define SMP_AUTH_REQUIREMENTS 0x03
+#define SMP_CONFIRM_FAILED 0x04
+#define SMP_PAIRING_NOTSUPP 0x05
+#define SMP_ENC_KEY_SIZE 0x06
+#define SMP_CMD_NOTSUPP 0x07
+#define SMP_UNSPECIFIED 0x08
+#define SMP_REPEATED_ATTEMPTS 0x09
+#define SMP_INVALID_PARAMS 0x0a
+
+#define SMP_MIN_ENC_KEY_SIZE 7
+#define SMP_MAX_ENC_KEY_SIZE 16
+
+/* SMP Commands */
+bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
+int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
+int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
+int smp_distribute_keys(struct l2cap_conn *conn);
+int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
+
+void smp_chan_destroy(struct l2cap_conn *conn);
+
+bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16],
+ bdaddr_t *bdaddr);
+int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa);
+
+#endif /* __SMP_H */