aboutsummaryrefslogtreecommitdiff
path: root/net/bluetooth
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth')
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/hci_conn.c56
-rw-r--r--net/bluetooth/hci_core.c267
-rw-r--r--net/bluetooth/hci_event.c75
-rw-r--r--net/bluetooth/hci_sysfs.c5
-rw-r--r--net/bluetooth/l2cap_core.c762
-rw-r--r--net/bluetooth/l2cap_sock.c76
-rw-r--r--net/bluetooth/mgmt.c286
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/bluetooth/sco.c75
-rw-r--r--net/bluetooth/smp.c2
12 files changed, 990 insertions, 638 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6fb68a9743a..46e7f86acfc 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
- bt_sk(parent)->defer_setup) {
+ test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
@@ -410,8 +410,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
if (sk->sk_state == BT_CONNECTED ||
- (bt_sk(parent)->defer_setup &&
- sk->sk_state == BT_CONNECT2))
+ (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
+ sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
@@ -450,7 +450,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wa
sk->sk_state == BT_CONFIG)
return mask;
- if (!bt_sk(sk)->suspended && sock_writeable(sk))
+ if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 88884d1d95f..031d7d65675 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -340,7 +340,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
}
/* Strip 802.1p header */
- if (ntohs(s->eh.h_proto) == 0x8100) {
+ if (ntohs(s->eh.h_proto) == ETH_P_8021Q) {
if (!skb_pull(skb, 4))
goto badframe;
s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 5238b6b3ea6..3f18a6ed973 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -223,36 +223,6 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
}
EXPORT_SYMBOL(hci_le_start_enc);
-void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
- memcpy(cp.ltk, ltk, sizeof(ltk));
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
-}
-EXPORT_SYMBOL(hci_le_ltk_reply);
-
-void hci_le_ltk_neg_reply(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_le_ltk_neg_reply cp;
-
- BT_DBG("%p", conn);
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = cpu_to_le16(conn->handle);
-
- hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
-}
-
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
@@ -513,7 +483,8 @@ EXPORT_SYMBOL(hci_get_route);
/* Create SCO, ACL or LE connection.
* Device _must_ be locked */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u8 dst_type, __u8 sec_level, __u8 auth_type)
{
struct hci_conn *acl;
struct hci_conn *sco;
@@ -522,23 +493,18 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (type == LE_LINK) {
- struct adv_entry *entry;
-
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
- if (le)
- return ERR_PTR(-EBUSY);
-
- entry = hci_find_adv_entry(hdev, dst);
- if (!entry)
- return ERR_PTR(-EHOSTUNREACH);
+ if (!le) {
+ le = hci_conn_add(hdev, LE_LINK, dst);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
- le = hci_conn_add(hdev, LE_LINK, dst);
- if (!le)
- return ERR_PTR(-ENOMEM);
-
- le->dst_type = entry->bdaddr_type;
+ le->dst_type = bdaddr_to_le(dst_type);
+ hci_le_connect(le);
+ }
- hci_le_connect(le);
+ le->pending_sec_level = sec_level;
+ le->auth_type = auth_type;
hci_conn_hold(le);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d6dc44cd15b..411ace8e647 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -83,6 +83,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
*/
if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
struct sk_buff *skb;
/* Some CSR based controllers generate a spontaneous
@@ -92,7 +93,7 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
* command.
*/
- if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
+ if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
return;
skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
@@ -251,6 +252,9 @@ static void amp_init(struct hci_dev *hdev)
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+ /* Read Local AMP Info */
+ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
}
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
@@ -384,7 +388,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STOPPED:
if (hdev->discovery.state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0);
- hdev->discovery.type = 0;
break;
case DISCOVERY_STARTING:
break;
@@ -1089,32 +1092,6 @@ static const struct rfkill_ops hci_rfkill_ops = {
.set_block = hci_rfkill_set_block,
};
-/* Alloc HCI device */
-struct hci_dev *hci_alloc_dev(void)
-{
- struct hci_dev *hdev;
-
- hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
- if (!hdev)
- return NULL;
-
- hci_init_sysfs(hdev);
- skb_queue_head_init(&hdev->driver_init);
-
- return hdev;
-}
-EXPORT_SYMBOL(hci_alloc_dev);
-
-/* Free HCI device */
-void hci_free_dev(struct hci_dev *hdev)
-{
- skb_queue_purge(&hdev->driver_init);
-
- /* will free via device release */
- put_device(&hdev->dev);
-}
-EXPORT_SYMBOL(hci_free_dev);
-
static void hci_power_on(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
@@ -1336,7 +1313,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
}
int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
- int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
+ int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
ediv, u8 rand[8])
{
struct smp_ltk *key, *old_key;
@@ -1544,75 +1521,6 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
return mgmt_device_unblocked(hdev, bdaddr, type);
}
-static void hci_clear_adv_cache(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- adv_work.work);
-
- hci_dev_lock(hdev);
-
- hci_adv_entries_clear(hdev);
-
- hci_dev_unlock(hdev);
-}
-
-int hci_adv_entries_clear(struct hci_dev *hdev)
-{
- struct adv_entry *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
- list_del(&entry->list);
- kfree(entry);
- }
-
- BT_DBG("%s adv cache cleared", hdev->name);
-
- return 0;
-}
-
-struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
-{
- struct adv_entry *entry;
-
- list_for_each_entry(entry, &hdev->adv_entries, list)
- if (bacmp(bdaddr, &entry->bdaddr) == 0)
- return entry;
-
- return NULL;
-}
-
-static inline int is_connectable_adv(u8 evt_type)
-{
- if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
- return 1;
-
- return 0;
-}
-
-int hci_add_adv_entry(struct hci_dev *hdev,
- struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
- return -EINVAL;
-
- /* Only new entries should be added to adv_entries. So, if
- * bdaddr was found, don't add it. */
- if (hci_find_adv_entry(hdev, &ev->bdaddr))
- return 0;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- bacpy(&entry->bdaddr, &ev->bdaddr);
- entry->bdaddr_type = ev->bdaddr_type;
-
- list_add(&entry->list, &hdev->adv_entries);
-
- BT_DBG("%s adv entry added: address %s type %u", hdev->name,
- batostr(&entry->bdaddr), entry->bdaddr_type);
-
- return 0;
-}
-
static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
{
struct le_scan_params *param = (struct le_scan_params *) opt;
@@ -1670,6 +1578,24 @@ static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
return 0;
}
+int hci_cancel_le_scan(struct hci_dev *hdev)
+{
+ BT_DBG("%s", hdev->name);
+
+ if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+ return -EALREADY;
+
+ if (cancel_delayed_work(&hdev->le_scan_disable)) {
+ struct hci_cp_le_set_scan_enable cp;
+
+ /* Send HCI command to disable LE Scan */
+ memset(&cp, 0, sizeof(cp));
+ hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+ }
+
+ return 0;
+}
+
static void le_scan_disable_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1714,95 +1640,103 @@ int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
return 0;
}
-/* Register HCI device */
-int hci_register_dev(struct hci_dev *hdev)
+/* Alloc HCI device */
+struct hci_dev *hci_alloc_dev(void)
{
- struct list_head *head = &hci_dev_list, *p;
- int i, id, error;
-
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
- if (!hdev->open || !hdev->close)
- return -EINVAL;
-
- /* Do not allow HCI_AMP devices to register at index 0,
- * so the index can be used as the AMP controller ID.
- */
- id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
-
- write_lock(&hci_dev_list_lock);
-
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- if (list_entry(p, struct hci_dev, list)->id != id)
- break;
- head = p; id++;
- }
-
- sprintf(hdev->name, "hci%d", id);
- hdev->id = id;
- list_add_tail(&hdev->list, head);
+ struct hci_dev *hdev;
- mutex_init(&hdev->lock);
+ hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
+ if (!hdev)
+ return NULL;
- hdev->flags = 0;
- hdev->dev_flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
hdev->io_capability = 0x03; /* No Input No Output */
- hdev->idle_timeout = 0;
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
+ mutex_init(&hdev->lock);
+ mutex_init(&hdev->req_lock);
+
+ INIT_LIST_HEAD(&hdev->mgmt_pending);
+ INIT_LIST_HEAD(&hdev->blacklist);
+ INIT_LIST_HEAD(&hdev->uuids);
+ INIT_LIST_HEAD(&hdev->link_keys);
+ INIT_LIST_HEAD(&hdev->long_term_keys);
+ INIT_LIST_HEAD(&hdev->remote_oob_data);
+
INIT_WORK(&hdev->rx_work, hci_rx_work);
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
+ INIT_WORK(&hdev->power_on, hci_power_on);
+ INIT_WORK(&hdev->le_scan, le_scan_work);
+ INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ skb_queue_head_init(&hdev->driver_init);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
- setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
-
- for (i = 0; i < NUM_REASSEMBLY; i++)
- hdev->reassembly[i] = NULL;
-
init_waitqueue_head(&hdev->req_wait_q);
- mutex_init(&hdev->req_lock);
- discovery_init(hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+ hci_init_sysfs(hdev);
+ discovery_init(hdev);
hci_conn_hash_init(hdev);
- INIT_LIST_HEAD(&hdev->mgmt_pending);
-
- INIT_LIST_HEAD(&hdev->blacklist);
+ return hdev;
+}
+EXPORT_SYMBOL(hci_alloc_dev);
- INIT_LIST_HEAD(&hdev->uuids);
+/* Free HCI device */
+void hci_free_dev(struct hci_dev *hdev)
+{
+ skb_queue_purge(&hdev->driver_init);
- INIT_LIST_HEAD(&hdev->link_keys);
- INIT_LIST_HEAD(&hdev->long_term_keys);
+ /* will free via device release */
+ put_device(&hdev->dev);
+}
+EXPORT_SYMBOL(hci_free_dev);
- INIT_LIST_HEAD(&hdev->remote_oob_data);
+/* Register HCI device */
+int hci_register_dev(struct hci_dev *hdev)
+{
+ struct list_head *head, *p;
+ int id, error;
- INIT_LIST_HEAD(&hdev->adv_entries);
+ if (!hdev->open || !hdev->close)
+ return -EINVAL;
- INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
- INIT_WORK(&hdev->power_on, hci_power_on);
- INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+ write_lock(&hci_dev_list_lock);
- INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
+ /* Do not allow HCI_AMP devices to register at index 0,
+ * so the index can be used as the AMP controller ID.
+ */
+ id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+ head = &hci_dev_list;
- memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+ /* Find first available device id */
+ list_for_each(p, &hci_dev_list) {
+ int nid = list_entry(p, struct hci_dev, list)->id;
+ if (nid > id)
+ break;
+ if (nid == id)
+ id++;
+ head = p;
+ }
- atomic_set(&hdev->promisc, 0);
+ sprintf(hdev->name, "hci%d", id);
+ hdev->id = id;
- INIT_WORK(&hdev->le_scan, le_scan_work);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
+ list_add(&hdev->list, head);
write_unlock(&hci_dev_list_lock);
@@ -1884,8 +1818,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_del_sysfs(hdev);
- cancel_delayed_work_sync(&hdev->adv_work);
-
destroy_workqueue(hdev->workqueue);
hci_dev_lock(hdev);
@@ -1894,7 +1826,6 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_link_keys_clear(hdev);
hci_smp_ltks_clear(hdev);
hci_remote_oob_data_clear(hdev);
- hci_adv_entries_clear(hdev);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@@ -2231,6 +2162,12 @@ static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
+ skb->len = skb_headlen(skb);
+ skb->data_len = 0;
+
+ bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags);
+
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
@@ -2274,8 +2211,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
skb->dev = (void *) hdev;
- bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
- hci_add_acl_hdr(skb, conn->handle, flags);
hci_queue_acl(conn, &chan->data_q, skb, flags);
@@ -2313,7 +2248,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
- int num = 0, min = ~0;
+ unsigned int num = 0, min = ~0;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
@@ -2394,7 +2329,7 @@ static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
- int num = 0, min = ~0, cur_prio = 0;
+ unsigned int num = 0, min = ~0, cur_prio = 0;
struct hci_conn *conn;
int cnt, q, conn_num = 0;
@@ -2945,7 +2880,19 @@ int hci_cancel_inquiry(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_INQUIRY, &hdev->flags))
- return -EPERM;
+ return -EALREADY;
return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
}
+
+u8 bdaddr_to_le(u8 bdaddr_type)
+{
+ switch (bdaddr_type) {
+ case BDADDR_LE_PUBLIC:
+ return ADDR_LE_DEV_PUBLIC;
+
+ default:
+ /* Fallback to LE Random address type */
+ return ADDR_LE_DEV_RANDOM;
+ }
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1266f78fa8e..4eefb7f65cf 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -69,6 +69,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
+static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%x", hdev->name, status);
+
+ if (status)
+ return;
+
+ set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+}
+
static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
@@ -78,6 +90,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
if (status)
return;
+ clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
+
hci_conn_check_pending(hdev);
}
@@ -192,7 +206,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_RESET, status);
/* Reset all non-persistent flags */
- hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
+ hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
+ BIT(HCI_PERIODIC_INQ));
hdev->discovery.state = DISCOVERY_STOPPED;
}
@@ -505,7 +520,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
- events[4] |= 0x04; /* Inquiry Result with RSSI */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
if (hdev->features[5] & LMP_SNIFF_SUBR)
events[5] |= 0x20; /* Sniff Subrating */
@@ -615,6 +630,7 @@ done:
static void hci_setup_link_policy(struct hci_dev *hdev)
{
+ struct hci_cp_write_def_link_policy cp;
u16 link_policy = 0;
if (hdev->features[0] & LMP_RSWITCH)
@@ -626,9 +642,8 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
if (hdev->features[1] & LMP_PARK)
link_policy |= HCI_LP_PARK;
- link_policy = cpu_to_le16(link_policy);
- hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
- &link_policy);
+ cp.policy = cpu_to_le16(link_policy);
+ hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
}
static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
@@ -710,7 +725,7 @@ static void hci_set_le_support(struct hci_dev *hdev)
memset(&cp, 0, sizeof(cp));
- if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+ if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
cp.le = 1;
cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
}
@@ -887,11 +902,14 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+ if (!rp->status)
+ hdev->inq_tx_power = rp->tx_power;
- hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
+ hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
}
static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1082,23 +1100,23 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
set_bit(HCI_LE_SCAN, &hdev->dev_flags);
- cancel_delayed_work_sync(&hdev->adv_work);
-
hci_dev_lock(hdev);
- hci_adv_entries_clear(hdev);
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
hci_dev_unlock(hdev);
break;
case LE_SCANNING_DISABLED:
- if (status)
+ if (status) {
+ hci_dev_lock(hdev);
+ mgmt_stop_discovery_failed(hdev, status);
+ hci_dev_unlock(hdev);
return;
+ }
clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
- schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
-
- if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
+ if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+ hdev->discovery.state == DISCOVERY_FINDING) {
mgmt_interleaved_discovery(hdev);
} else {
hci_dev_lock(hdev);
@@ -1625,6 +1643,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
if (status) {
if (conn && conn->state == BT_CONNECT) {
conn->state = BT_CLOSED;
+ mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
+ conn->dst_type, status);
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
}
@@ -1699,6 +1719,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -2040,7 +2063,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
if (ev->status && conn->state == BT_CONNECTED) {
- hci_acl_disconn(conn, 0x13);
+ hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
hci_conn_put(conn);
goto unlock;
}
@@ -2154,6 +2177,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_inquiry_cancel(hdev, skb);
break;
+ case HCI_OP_PERIODIC_INQ:
+ hci_cc_periodic_inq(hdev, skb);
+ break;
+
case HCI_OP_EXIT_PERIODIC_INQ:
hci_cc_exit_periodic_inq(hdev, skb);
break;
@@ -2806,6 +2833,9 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
@@ -2971,12 +3001,16 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
+ size_t eir_len;
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!num_rsp)
return;
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
+ return;
+
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--, info++) {
@@ -3000,9 +3034,10 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
name_known = hci_inquiry_cache_update(hdev, &data, name_known,
&ssp);
+ eir_len = eir_get_length(info->data, sizeof(info->data));
mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
info->dev_class, info->rssi, !name_known,
- ssp, info->data, sizeof(info->data));
+ ssp, info->data, eir_len);
}
hci_dev_unlock(hdev);
@@ -3322,8 +3357,6 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
while (num_reports--) {
struct hci_ev_le_advertising_info *ev = ptr;
- hci_add_adv_entry(hdev, ev);
-
rssi = ev->data[ev->length];
mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
NULL, rssi, 0, 1, ev->data, ev->length);
@@ -3343,7 +3376,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct hci_conn *conn;
struct smp_ltk *ltk;
- BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
+ BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index bc154298979..937f3187eaf 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -444,8 +444,8 @@ static const struct file_operations blacklist_fops = {
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
{
- u32 data0, data4;
- u16 data1, data2, data3, data5;
+ __be32 data0, data4;
+ __be16 data1, data2, data3, data5;
memcpy(&data0, &uuid[0], 4);
memcpy(&data1, &uuid[4], 2);
@@ -533,7 +533,6 @@ int hci_add_sysfs(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- dev->parent = hdev->parent;
dev_set_name(dev, "%s", hdev->name);
err = device_add(dev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6f9c25b633a..24f144b72a9 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4,6 +4,7 @@
Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Copyright (C) 2010 Google Inc.
Copyright (C) 2011 ProFUSION Embedded Systems
+ Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -70,7 +71,7 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
static void l2cap_send_disconn_req(struct l2cap_conn *conn,
- struct l2cap_chan *chan, int err);
+ struct l2cap_chan *chan, int err);
/* ---- L2CAP channels ---- */
@@ -97,13 +98,15 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16
}
/* Find channel with given SCID.
- * Returns locked socket */
+ * Returns locked channel. */
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
{
struct l2cap_chan *c;
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
+ if (c)
+ l2cap_chan_lock(c);
mutex_unlock(&conn->chan_lock);
return c;
@@ -120,17 +123,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8
return NULL;
}
-static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
{
struct l2cap_chan *c;
@@ -232,6 +224,124 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
release_sock(sk);
}
+/* ---- L2CAP sequence number lists ---- */
+
+/* For ERTM, ordered lists of sequence numbers must be tracked for
+ * SREJ requests that are received and for frames that are to be
+ * retransmitted. These seq_list functions implement a singly-linked
+ * list in an array, where membership in the list can also be checked
+ * in constant time. Items can also be added to the tail of the list
+ * and removed from the head in constant time, without further memory
+ * allocs or frees.
+ */
+
+static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
+{
+ size_t alloc_size, i;
+
+ /* Allocated size is a power of 2 to map sequence numbers
+ * (which may be up to 14 bits) in to a smaller array that is
+ * sized for the negotiated ERTM transmit windows.
+ */
+ alloc_size = roundup_pow_of_two(size);
+
+ seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
+ if (!seq_list->list)
+ return -ENOMEM;
+
+ seq_list->mask = alloc_size - 1;
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ for (i = 0; i < alloc_size; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ return 0;
+}
+
+static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
+{
+ kfree(seq_list->list);
+}
+
+static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
+ u16 seq)
+{
+ /* Constant-time check for list membership */
+ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
+}
+
+static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
+ /* In case someone tries to pop the head of an empty list */
+ return L2CAP_SEQ_LIST_CLEAR;
+ } else if (seq_list->head == seq) {
+ /* Head can be removed in constant time */
+ seq_list->head = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+ }
+ } else {
+ /* Walk the list to find the sequence number */
+ u16 prev = seq_list->head;
+ while (seq_list->list[prev & mask] != seq) {
+ prev = seq_list->list[prev & mask];
+ if (prev == L2CAP_SEQ_LIST_TAIL)
+ return L2CAP_SEQ_LIST_CLEAR;
+ }
+
+ /* Unlink the number from the list and clear it */
+ seq_list->list[prev & mask] = seq_list->list[seq & mask];
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
+ if (seq_list->tail == seq)
+ seq_list->tail = prev;
+ }
+ return seq;
+}
+
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
+{
+ /* Remove the head in constant time */
+ return l2cap_seq_list_remove(seq_list, seq_list->head);
+}
+
+static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
+{
+ u16 i;
+
+ if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ for (i = 0; i <= seq_list->mask; i++)
+ seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
+
+ seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+ seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
+}
+
+static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
+{
+ u16 mask = seq_list->mask;
+
+ /* All appends happen in constant time */
+
+ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
+ seq_list->head = seq;
+ else
+ seq_list->list[seq_list->tail & mask] = seq;
+
+ seq_list->tail = seq;
+ seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
+}
+
static void l2cap_chan_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -262,7 +372,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-struct l2cap_chan *l2cap_chan_create(struct sock *sk)
+struct l2cap_chan *l2cap_chan_create(void)
{
struct l2cap_chan *chan;
@@ -272,8 +382,6 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
mutex_init(&chan->lock);
- chan->sk = sk;
-
write_lock(&chan_list_lock);
list_add(&chan->global_l, &chan_list);
write_unlock(&chan_list_lock);
@@ -284,7 +392,7 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
atomic_set(&chan->refcnt, 1);
- BT_DBG("sk %p chan %p", sk, chan);
+ BT_DBG("chan %p", chan);
return chan;
}
@@ -298,10 +406,21 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
l2cap_chan_put(chan);
}
-void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void l2cap_chan_set_defaults(struct l2cap_chan *chan)
+{
+ chan->fcs = L2CAP_FCS_CRC16;
+ chan->max_tx = L2CAP_DEFAULT_MAX_TX;
+ chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+ chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->sec_level = BT_SECURITY_LOW;
+
+ set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+}
+
+static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
- chan->psm, chan->dcid);
+ __le16_to_cpu(chan->psm), chan->dcid);
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@ -347,7 +466,7 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
list_add(&chan->list, &conn->chan_l);
}
-void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
mutex_lock(&conn->chan_lock);
__l2cap_chan_add(conn, chan);
@@ -405,6 +524,8 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
skb_queue_purge(&chan->srej_q);
+ l2cap_seq_list_free(&chan->srej_list);
+ l2cap_seq_list_free(&chan->retrans_list);
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
list_del(&l->list);
kfree(l);
@@ -453,7 +574,6 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
case BT_CONFIG:
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
conn->hcon->type == ACL_LINK) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, sk->sk_sndtimeo);
l2cap_send_disconn_req(conn, chan, reason);
} else
@@ -466,7 +586,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
struct l2cap_conn_rsp rsp;
__u16 result;
- if (bt_sk(sk)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
result = L2CAP_CR_SEC_BLOCK;
else
result = L2CAP_CR_BAD_PSM;
@@ -599,6 +719,117 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
hci_send_acl(chan->conn->hchan, skb, flags);
}
+static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
+{
+ control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+ control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
+
+ if (enh & L2CAP_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
+ control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+ control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
+{
+ control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
+ /* S-Frame */
+ control->sframe = 1;
+ control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
+ control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
+
+ control->sar = 0;
+ control->txseq = 0;
+ } else {
+ /* I-Frame */
+ control->sframe = 0;
+ control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+ control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+
+ control->poll = 0;
+ control->super = 0;
+ }
+}
+
+static inline void __unpack_control(struct l2cap_chan *chan,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ __unpack_extended_control(get_unaligned_le32(skb->data),
+ &bt_cb(skb)->control);
+ } else {
+ __unpack_enhanced_control(get_unaligned_le16(skb->data),
+ &bt_cb(skb)->control);
+ }
+}
+
+static u32 __pack_extended_control(struct l2cap_ctrl *control)
+{
+ u32 packed;
+
+ packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
+{
+ u16 packed;
+
+ packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
+ packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
+
+ if (control->sframe) {
+ packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
+ packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
+ packed |= L2CAP_CTRL_FRAME_TYPE;
+ } else {
+ packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
+ packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
+ }
+
+ return packed;
+}
+
+static inline void __pack_control(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb)
+{
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(control),
+ skb->data + L2CAP_HDR_SIZE);
+ }
+}
+
static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
{
struct sk_buff *skb;
@@ -681,10 +912,38 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
+static void l2cap_chan_ready(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->sk;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ chan->conf_state = 0;
+ __clear_chan_timer(chan);
+
+ __l2cap_state_change(chan, BT_CONNECTED);
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
static void l2cap_do_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
+ if (conn->hcon->type == LE_LINK) {
+ l2cap_chan_ready(chan);
+ return;
+ }
+
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
@@ -791,7 +1050,8 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (l2cap_chan_check_security(chan)) {
lock_sock(sk);
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
@@ -830,10 +1090,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
mutex_unlock(&conn->chan_lock);
}
-/* Find socket with cid and source bdaddr.
+/* Find socket with cid and source/destination bdaddr.
* Returns closest match, locked.
*/
-static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -846,14 +1108,22 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
continue;
if (c->scid == cid) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -872,7 +1142,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
/* Check if we have socket listening on cid */
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
- conn->src);
+ conn->src, conn->dst);
if (!pchan)
return;
@@ -910,29 +1180,6 @@ clean:
release_sock(parent);
}
-static void l2cap_chan_ready(struct l2cap_chan *chan)
-{
- struct sock *sk = chan->sk;
- struct sock *parent;
-
- lock_sock(sk);
-
- parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
- chan->conf_state = 0;
- __clear_chan_timer(chan);
-
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
-
- release_sock(sk);
-}
-
static void l2cap_conn_ready(struct l2cap_conn *conn)
{
struct l2cap_chan *chan;
@@ -1016,6 +1263,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, err);
@@ -1023,6 +1271,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
}
mutex_unlock(&conn->chan_lock);
@@ -1100,10 +1349,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
/* ---- Socket interface ---- */
-/* Find socket with psm and source bdaddr.
+/* Find socket with psm and source / destination bdaddr.
* Returns closest match.
*/
-static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
+ bdaddr_t *src,
+ bdaddr_t *dst)
{
struct l2cap_chan *c, *c1 = NULL;
@@ -1116,14 +1367,22 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
continue;
if (c->psm == psm) {
+ int src_match, dst_match;
+ int src_any, dst_any;
+
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src)) {
+ src_match = !bacmp(&bt_sk(sk)->src, src);
+ dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+ if (src_match && dst_match) {
read_unlock(&chan_list_lock);
return c;
}
/* Closest match */
- if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+ src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
+ dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+ if ((src_match && dst_any) || (src_any && dst_match) ||
+ (src_any && dst_any))
c1 = c;
}
}
@@ -1133,7 +1392,8 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
return c1;
}
-int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ bdaddr_t *dst, u8 dst_type)
{
struct sock *sk = chan->sk;
bdaddr_t *src = &bt_sk(sk)->src;
@@ -1143,8 +1403,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
__u8 auth_type;
int err;
- BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
- chan->psm);
+ BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
+ dst_type, __le16_to_cpu(chan->psm));
hdev = hci_get_route(dst, src);
if (!hdev)
@@ -1218,11 +1478,11 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
auth_type = l2cap_get_auth_type(chan);
if (chan->dcid == L2CAP_CID_LE_DATA)
- hcon = hci_connect(hdev, LE_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
else
- hcon = hci_connect(hdev, ACL_LINK, dst,
- chan->sec_level, auth_type);
+ hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
+ chan->sec_level, auth_type);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
@@ -1236,6 +1496,18 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *d
goto done;
}
+ if (hcon->type == LE_LINK) {
+ err = 0;
+
+ if (!list_empty(&conn->chan_l)) {
+ err = -EBUSY;
+ hci_conn_put(hcon);
+ }
+
+ if (err)
+ goto done;
+ }
+
/* Update source addr of the socket */
bacpy(src, conn->src);
@@ -1346,7 +1618,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
while ((skb = skb_peek(&chan->tx_q)) &&
chan->unacked_frames) {
- if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
+ if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
break;
skb = skb_dequeue(&chan->tx_q);
@@ -1368,6 +1640,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan)
while ((skb = skb_dequeue(&chan->tx_q))) {
control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
if (chan->fcs == L2CAP_FCS_CRC16) {
@@ -1393,21 +1666,21 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
if (!skb)
return;
- while (bt_cb(skb)->tx_seq != tx_seq) {
+ while (bt_cb(skb)->control.txseq != tx_seq) {
if (skb_queue_is_last(&chan->tx_q, skb))
return;
skb = skb_queue_next(&chan->tx_q, skb);
}
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
return;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1440,17 +1713,20 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (chan->state != BT_CONNECTED)
return -ENOTCONN;
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
+
while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
- if (chan->remote_max_tx &&
- bt_cb(skb)->retries == chan->remote_max_tx) {
+ if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
+ chan->remote_max_tx) {
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
break;
}
tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->retries++;
+ bt_cb(skb)->control.retries++;
control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
control &= __get_sar_mask(chan);
@@ -1460,6 +1736,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
control |= __set_reqseq(chan, chan->buffer_seq);
control |= __set_txseq(chan, chan->next_tx_seq);
+ control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
__put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1474,11 +1751,11 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
__set_retrans_timer(chan);
- bt_cb(skb)->tx_seq = chan->next_tx_seq;
+ bt_cb(skb)->control.txseq = chan->next_tx_seq;
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
- if (bt_cb(skb)->retries == 1) {
+ if (bt_cb(skb)->control.retries == 1) {
chan->unacked_frames++;
if (!nsent++)
@@ -1554,7 +1831,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff **frag;
- int err, sent = 0;
+ int sent = 0;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
return -EFAULT;
@@ -1565,14 +1842,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
+ struct sk_buff *tmp;
+
count = min_t(unsigned int, conn->mtu, len);
- *frag = chan->ops->alloc_skb(chan, count,
- msg->msg_flags & MSG_DONTWAIT,
- &err);
+ tmp = chan->ops->alloc_skb(chan, count,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ *frag = tmp;
- if (!*frag)
- return err;
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
return -EFAULT;
@@ -1581,6 +1861,9 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
sent += count;
len -= count;
+ skb->len += (*frag)->len;
+ skb->data_len += (*frag)->len;
+
frag = &(*frag)->next;
}
@@ -1601,18 +1884,17 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- put_unaligned_le16(chan->psm, skb_put(skb, 2));
+ lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
+ put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1628,25 +1910,24 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
- int err, count, hlen = L2CAP_HDR_SIZE;
+ int err, count;
struct l2cap_hdr *lh;
BT_DBG("chan %p len %d", chan, (int)len);
- count = min_t(unsigned int, (conn->mtu - hlen), len);
+ count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
- skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
skb->priority = priority;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ lh->len = cpu_to_le16(len);
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
if (unlikely(err < 0)) {
@@ -1658,7 +1939,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
struct msghdr *msg, size_t len,
- u32 control, u16 sdulen)
+ u16 sdulen)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
@@ -1684,17 +1965,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
count = min_t(unsigned int, (conn->mtu - hlen), len);
skb = chan->ops->alloc_skb(chan, count + hlen,
- msg->msg_flags & MSG_DONTWAIT, &err);
-
- if (!skb)
- return ERR_PTR(err);
+ msg->msg_flags & MSG_DONTWAIT);
+ if (IS_ERR(skb))
+ return skb;
/* Create L2CAP header */
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+ __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1708,61 +1988,82 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
if (chan->fcs == L2CAP_FCS_CRC16)
put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
- bt_cb(skb)->retries = 0;
+ bt_cb(skb)->control.retries = 0;
return skb;
}
-static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static int l2cap_segment_sdu(struct l2cap_chan *chan,
+ struct sk_buff_head *seg_queue,
+ struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
- struct sk_buff_head sar_queue;
- u32 control;
- size_t size = 0;
+ u16 sdu_len;
+ size_t pdu_len;
+ int err = 0;
+ u8 sar;
- skb_queue_head_init(&sar_queue);
- control = __set_ctrl_sar(chan, L2CAP_SAR_START);
- skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
- __skb_queue_tail(&sar_queue, skb);
- len -= chan->remote_mps;
- size += chan->remote_mps;
+ /* It is critical that ERTM PDUs fit in a single HCI fragment,
+ * so fragmented skbs are not used. The HCI layer's handling
+ * of fragmented skbs is not compatible with ERTM's queueing.
+ */
- while (len > 0) {
- size_t buflen;
+ /* PDU size is derived from the HCI MTU */
+ pdu_len = chan->conn->mtu;
- if (len > chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
- buflen = chan->remote_mps;
- } else {
- control = __set_ctrl_sar(chan, L2CAP_SAR_END);
- buflen = len;
- }
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+
+ /* Adjust for largest possible L2CAP overhead. */
+ pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
+
+ /* Remote device may have requested smaller PDUs */
+ pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_UNSEGMENTED;
+ sdu_len = 0;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_START;
+ sdu_len = len;
+ pdu_len -= L2CAP_SDULEN_SIZE;
+ }
+
+ while (len > 0) {
+ skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
- skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
if (IS_ERR(skb)) {
- skb_queue_purge(&sar_queue);
+ __skb_queue_purge(seg_queue);
return PTR_ERR(skb);
}
- __skb_queue_tail(&sar_queue, skb);
- len -= buflen;
- size += buflen;
+ bt_cb(skb)->control.sar = sar;
+ __skb_queue_tail(seg_queue, skb);
+
+ len -= pdu_len;
+ if (sdu_len) {
+ sdu_len = 0;
+ pdu_len += L2CAP_SDULEN_SIZE;
+ }
+
+ if (len <= pdu_len) {
+ sar = L2CAP_SAR_END;
+ pdu_len = len;
+ } else {
+ sar = L2CAP_SAR_CONTINUE;
+ }
}
- skb_queue_splice_tail(&sar_queue, &chan->tx_q);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = sar_queue.next;
- return size;
+ return err;
}
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
u32 priority)
{
struct sk_buff *skb;
- u32 control;
int err;
+ struct sk_buff_head seg_queue;
/* Connectionless channel */
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
@@ -1791,42 +2092,47 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
- /* Entire SDU fits into one PDU */
- if (len <= chan->remote_mps) {
- control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
- skb = l2cap_create_iframe_pdu(chan, msg, len, control,
- 0);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ /* Check outgoing MTU */
+ if (len > chan->omtu) {
+ err = -EMSGSIZE;
+ break;
+ }
- __skb_queue_tail(&chan->tx_q, skb);
+ __skb_queue_head_init(&seg_queue);
- if (chan->tx_send_head == NULL)
- chan->tx_send_head = skb;
+ /* Do segmentation before calling in to the state machine,
+ * since it's possible to block while waiting for memory
+ * allocation.
+ */
+ err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
- } else {
- /* Segment SDU into multiples PDUs */
- err = l2cap_sar_segment_sdu(chan, msg, len);
- if (err < 0)
- return err;
+ /* The channel could have been closed while segmenting,
+ * check that it is still connected.
+ */
+ if (chan->state != BT_CONNECTED) {
+ __skb_queue_purge(&seg_queue);
+ err = -ENOTCONN;
}
- if (chan->mode == L2CAP_MODE_STREAMING) {
- l2cap_streaming_send(chan);
- err = len;
+ if (err)
break;
- }
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- err = len;
- break;
- }
+ if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
+ chan->tx_send_head = seg_queue.next;
+ skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
+
+ if (chan->mode == L2CAP_MODE_ERTM)
+ err = l2cap_ertm_send(chan);
+ else
+ l2cap_streaming_send(chan);
- err = l2cap_ertm_send(chan);
if (err >= 0)
err = len;
+ /* If the skbs were not queued for sending, they'll still be in
+ * seg_queue and need to be purged.
+ */
+ __skb_queue_purge(&seg_queue);
break;
default:
@@ -2040,13 +2346,29 @@ static void l2cap_ack_timeout(struct work_struct *work)
l2cap_chan_put(chan);
}
-static inline void l2cap_ertm_init(struct l2cap_chan *chan)
+static inline int l2cap_ertm_init(struct l2cap_chan *chan)
{
+ int err;
+
+ chan->next_tx_seq = 0;
+ chan->expected_tx_seq = 0;
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
chan->num_acked = 0;
chan->frames_sent = 0;
+ chan->last_acked_seq = 0;
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return 0;
+
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
@@ -2055,6 +2377,11 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->srej_q);
INIT_LIST_HEAD(&chan->srej_l);
+ err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
+ if (err < 0)
+ return err;
+
+ return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2378,9 +2705,9 @@ done:
chan->remote_mps = size;
rfc.retrans_timeout =
- le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
rfc.monitor_timeout =
- le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
+ __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
set_bit(CONF_MODE_DONE, &chan->conf_state);
@@ -2644,10 +2971,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
__le16 psm = req->psm;
- BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
/* Check if we have socket listening on psm */
- pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
if (!pchan) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
@@ -2706,7 +3033,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
if (l2cap_chan_check_security(chan)) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
__l2cap_state_change(chan, BT_CONNECT2);
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHOR_PEND;
@@ -2848,7 +3175,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
u16 dcid, flags;
u8 rsp[64];
struct l2cap_chan *chan;
- int len;
+ int len, err = 0;
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
@@ -2859,8 +3186,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (!chan)
return -ENOENT;
- l2cap_chan_lock(chan);
-
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
struct l2cap_cmd_rej_cid rej;
@@ -2915,13 +3240,15 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
+
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
- l2cap_chan_ready(chan);
goto unlock;
}
@@ -2949,7 +3276,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
unlock:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -2957,21 +3284,20 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
u16 scid, flags, result;
struct l2cap_chan *chan;
- int len = cmd->len - sizeof(*rsp);
+ int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
+ int err = 0;
scid = __le16_to_cpu(rsp->scid);
flags = __le16_to_cpu(rsp->flags);
result = __le16_to_cpu(rsp->result);
- BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
- scid, flags, result);
+ BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
+ result, len);
chan = l2cap_get_chan_by_scid(conn, scid);
if (!chan)
return 0;
- l2cap_chan_lock(chan);
-
switch (result) {
case L2CAP_CONF_SUCCESS:
l2cap_conf_rfc_get(chan, rsp->data, len);
@@ -3045,18 +3371,19 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
set_default_fcs(chan);
l2cap_state_change(chan, BT_CONNECTED);
- chan->next_tx_seq = 0;
- chan->expected_tx_seq = 0;
- skb_queue_head_init(&chan->tx_q);
- if (chan->mode == L2CAP_MODE_ERTM)
- l2cap_ertm_init(chan);
+ if (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_ertm_init(chan);
- l2cap_chan_ready(chan);
+ if (err < 0)
+ l2cap_send_disconn_req(chan->conn, chan, -err);
+ else
+ l2cap_chan_ready(chan);
}
done:
l2cap_chan_unlock(chan);
- return 0;
+ return err;
}
static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
@@ -3092,11 +3419,13 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
release_sock(sk);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3124,11 +3453,13 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_lock(chan);
+ l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
l2cap_chan_unlock(chan);
chan->ops->close(chan->data);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3265,8 +3596,8 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
/* Placeholder: Always reject */
rsp.dcid = 0;
rsp.scid = cpu_to_le16(scid);
- rsp.result = L2CAP_CR_NO_MEM;
- rsp.status = L2CAP_CS_NO_INFO;
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
sizeof(rsp), &rsp);
@@ -3665,19 +3996,19 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
struct sk_buff *next_skb;
int tx_seq_offset, next_tx_seq_offset;
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
next_skb = skb_peek(&chan->srej_q);
tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
while (next_skb) {
- if (bt_cb(next_skb)->tx_seq == tx_seq)
+ if (bt_cb(next_skb)->control.txseq == tx_seq)
return -EINVAL;
next_tx_seq_offset = __seq_offset(chan,
- bt_cb(next_skb)->tx_seq, chan->buffer_seq);
+ bt_cb(next_skb)->control.txseq, chan->buffer_seq);
if (next_tx_seq_offset > tx_seq_offset) {
__skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3800,6 +4131,7 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
BT_DBG("chan %p, Enter local busy", chan);
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
__set_ack_timer(chan);
}
@@ -3848,11 +4180,11 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
int err;
- if (bt_cb(skb)->tx_seq != tx_seq)
+ if (bt_cb(skb)->control.txseq != tx_seq)
break;
skb = skb_dequeue(&chan->srej_q);
- control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
+ control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
@@ -3892,6 +4224,7 @@ static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
while (tx_seq != chan->expected_tx_seq) {
control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
control |= __set_reqseq(chan, chan->expected_tx_seq);
+ l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
l2cap_send_sframe(chan, control);
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
@@ -4022,8 +4355,8 @@ expected:
chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->tx_seq = tx_seq;
- bt_cb(skb)->sar = sar;
+ bt_cb(skb)->control.txseq = tx_seq;
+ bt_cb(skb)->control.sar = sar;
__skb_queue_tail(&chan->srej_q, skb);
return 0;
}
@@ -4220,6 +4553,8 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
u16 req_seq;
int len, next_tx_seq_offset, req_seq_offset;
+ __unpack_control(chan, skb);
+
control = __get_control(chan, skb->data);
skb_pull(skb, __ctrl_size(chan));
len = skb->len;
@@ -4295,8 +4630,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
return 0;
}
- l2cap_chan_lock(chan);
-
BT_DBG("chan %p, len %d", chan, skb->len);
if (chan->state != BT_CONNECTED)
@@ -4375,7 +4708,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_psm(0, psm, conn->src);
+ chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4396,11 +4729,12 @@ drop:
return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
+static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
- chan = l2cap_global_chan_by_scid(0, cid, conn->src);
+ chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
if (!chan)
goto drop;
@@ -4445,7 +4779,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case L2CAP_CID_CONN_LESS:
- psm = get_unaligned_le16(skb->data);
+ psm = get_unaligned((__le16 *) skb->data);
skb_pull(skb, 2);
l2cap_conless_channel(conn, psm, skb);
break;
@@ -4540,7 +4874,6 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
if (encrypt == 0x00) {
if (chan->sec_level == BT_SECURITY_MEDIUM) {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
} else if (chan->sec_level == BT_SECURITY_HIGH)
l2cap_chan_close(chan, ECONNREFUSED);
@@ -4561,7 +4894,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p", conn);
if (hcon->type == LE_LINK) {
- smp_distribute_keys(conn, 0);
+ if (!status && encrypt)
+ smp_distribute_keys(conn, 0);
cancel_delayed_work(&conn->security_timer);
}
@@ -4591,7 +4925,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
chan->state == BT_CONFIG)) {
struct sock *sk = chan->sk;
- bt_sk(sk)->suspended = false;
+ clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
sk->sk_state_change(sk);
l2cap_check_encryption(chan, encrypt);
@@ -4603,7 +4937,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!status) {
l2cap_send_conn_req(chan);
} else {
- __clear_chan_timer(chan);
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
} else if (chan->state == BT_CONNECT2) {
@@ -4614,7 +4947,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
lock_sock(sk);
if (!status) {
- if (bt_sk(sk)->defer_setup) {
+ if (test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
res = L2CAP_CR_PEND;
stat = L2CAP_CS_AUTHOR_PEND;
@@ -4664,8 +4998,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (!(flags & ACL_CONT)) {
struct l2cap_hdr *hdr;
- struct l2cap_chan *chan;
- u16 cid;
int len;
if (conn->rx_len) {
@@ -4685,7 +5017,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hdr = (struct l2cap_hdr *) skb->data;
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
- cid = __le16_to_cpu(hdr->cid);
if (len == skb->len) {
/* Complete frame received */
@@ -4702,23 +5033,6 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
}
- chan = l2cap_get_chan_by_scid(conn, cid);
-
- if (chan && chan->sk) {
- struct sock *sk = chan->sk;
- lock_sock(sk);
-
- if (chan->imtu < len - L2CAP_HDR_SIZE) {
- BT_ERR("Frame exceeding recv MTU (len %d, "
- "MTU %d)", len,
- chan->imtu);
- release_sock(sk);
- l2cap_conn_unreliable(conn, ECOMM);
- goto drop;
- }
- release_sock(sk);
- }
-
/* Allocate skb for the complete frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!conn->rx_skb)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 04e7c172d49..3bb1611b9d4 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -124,7 +124,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
return -EINVAL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
- &la.l2_bdaddr);
+ &la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
return err;
@@ -148,12 +148,16 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
- if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
- || sk->sk_state != BT_BOUND) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+ goto done;
+ }
+
switch (chan->mode) {
case L2CAP_MODE_BASIC:
break;
@@ -320,8 +324,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
case L2CAP_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
- !(sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup)) {
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -375,7 +379,10 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
}
memset(&sec, 0, sizeof(sec));
- sec.level = chan->sec_level;
+ if (chan->conn)
+ sec.level = chan->conn->hcon->sec_level;
+ else
+ sec.level = chan->sec_level;
if (sk->sk_state == BT_CONNECTED)
sec.key_size = chan->conn->hcon->enc_key_size;
@@ -392,7 +399,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -594,10 +602,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
/* or for ACL link */
} else if ((sk->sk_state == BT_CONNECT2 &&
- bt_sk(sk)->defer_setup) ||
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
sk->sk_state == BT_CONNECTED) {
if (!l2cap_chan_check_security(chan))
- bt_sk(sk)->suspended = true;
+ set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
else
sk->sk_state_change(sk);
} else {
@@ -616,7 +624,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
case BT_FLUSHABLE:
@@ -716,16 +727,13 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
- if (sk->sk_state != BT_CONNECTED) {
- release_sock(sk);
+ if (sk->sk_state != BT_CONNECTED)
return -ENOTCONN;
- }
+ l2cap_chan_lock(chan);
err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+ l2cap_chan_unlock(chan);
- release_sock(sk);
return err;
}
@@ -737,7 +745,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
lock_sock(sk);
- if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
+ if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(sk)->flags)) {
sk->sk_state = BT_CONFIG;
pi->chan->state = BT_CONFIG;
@@ -931,12 +940,19 @@ static void l2cap_sock_state_change_cb(void *data, int state)
}
static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long len, int nb,
- int *err)
+ unsigned long len, int nb)
{
- struct sock *sk = chan->sk;
+ struct sk_buff *skb;
+ int err;
+
+ l2cap_chan_unlock(chan);
+ skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+ l2cap_chan_lock(chan);
+
+ if (!skb)
+ return ERR_PTR(err);
- return bt_skb_send_alloc(sk, len, nb, err);
+ return skb;
}
static struct l2cap_ops l2cap_chan_ops = {
@@ -952,6 +968,7 @@ static void l2cap_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
+ l2cap_chan_put(l2cap_pi(sk)->chan);
if (l2cap_pi(sk)->rx_busy_skb) {
kfree_skb(l2cap_pi(sk)->rx_busy_skb);
l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -972,7 +989,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
sk->sk_type = parent->sk_type;
- bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
+ bt_sk(sk)->flags = bt_sk(parent)->flags;
chan->chan_type = pchan->chan_type;
chan->imtu = pchan->imtu;
@@ -1010,13 +1027,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
} else {
chan->mode = L2CAP_MODE_BASIC;
}
- chan->max_tx = L2CAP_DEFAULT_MAX_TX;
- chan->fcs = L2CAP_FCS_CRC16;
- chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
- chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
- chan->sec_level = BT_SECURITY_LOW;
- chan->flags = 0;
- set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+
+ l2cap_chan_set_defaults(chan);
}
/* Default config options */
@@ -1052,12 +1064,16 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
- chan = l2cap_chan_create(sk);
+ chan = l2cap_chan_create();
if (!chan) {
l2cap_sock_kill(sk);
return NULL;
}
+ l2cap_chan_hold(chan);
+
+ chan->sk = sk;
+
l2cap_pi(sk)->chan = chan;
return sk;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4bb03b11112..25d22077607 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -35,10 +35,9 @@
#include <net/bluetooth/smp.h>
bool enable_hs;
-bool enable_le;
#define MGMT_VERSION 1
-#define MGMT_REVISION 0
+#define MGMT_REVISION 1
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -78,6 +77,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_CONFIRM_NAME,
MGMT_OP_BLOCK_DEVICE,
MGMT_OP_UNBLOCK_DEVICE,
+ MGMT_OP_SET_DEVICE_ID,
};
static const u16 mgmt_events[] = {
@@ -224,7 +224,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev = (void *) skb_put(skb, sizeof(*ev));
ev->status = status;
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
@@ -254,7 +254,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
- put_unaligned_le16(cmd, &ev->opcode);
+ ev->opcode = cpu_to_le16(cmd);
ev->status = status;
if (rp)
@@ -275,7 +275,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("sock %p", sk);
rp.version = MGMT_VERSION;
- put_unaligned_le16(MGMT_REVISION, &rp.revision);
+ rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
sizeof(rp));
@@ -285,9 +285,9 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
struct mgmt_rp_read_commands *rp;
- u16 num_commands = ARRAY_SIZE(mgmt_commands);
- u16 num_events = ARRAY_SIZE(mgmt_events);
- u16 *opcode;
+ const u16 num_commands = ARRAY_SIZE(mgmt_commands);
+ const u16 num_events = ARRAY_SIZE(mgmt_events);
+ __le16 *opcode;
size_t rp_size;
int i, err;
@@ -299,8 +299,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
if (!rp)
return -ENOMEM;
- put_unaligned_le16(num_commands, &rp->num_commands);
- put_unaligned_le16(num_events, &rp->num_events);
+ rp->num_commands = __constant_cpu_to_le16(num_commands);
+ rp->num_events = __constant_cpu_to_le16(num_events);
for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
put_unaligned_le16(mgmt_commands[i], opcode);
@@ -341,14 +341,14 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
return -ENOMEM;
}
- put_unaligned_le16(count, &rp->num_controllers);
+ rp->num_controllers = cpu_to_le16(count);
i = 0;
list_for_each_entry(d, &hci_dev_list, list) {
if (test_bit(HCI_SETUP, &d->dev_flags))
continue;
- put_unaligned_le16(d->id, &rp->index[i++]);
+ rp->index[i++] = cpu_to_le16(d->id);
BT_DBG("Added hci%u", d->id);
}
@@ -383,10 +383,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (enable_hs)
settings |= MGMT_SETTING_HS;
- if (enable_le) {
- if (hdev->features[4] & LMP_LE)
- settings |= MGMT_SETTING_LE;
- }
+ if (hdev->features[4] & LMP_LE)
+ settings |= MGMT_SETTING_LE;
return settings;
}
@@ -442,9 +440,7 @@ static u16 get_uuid16(u8 *uuid128)
return 0;
}
- memcpy(&val, &uuid128[12], 4);
-
- val = le32_to_cpu(val);
+ val = get_unaligned_le32(&uuid128[12]);
if (val > 0xffff)
return 0;
@@ -479,6 +475,28 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
ptr += (name_len + 2);
}
+ if (hdev->inq_tx_power) {
+ ptr[0] = 2;
+ ptr[1] = EIR_TX_POWER;
+ ptr[2] = (u8) hdev->inq_tx_power;
+
+ eir_len += 3;
+ ptr += 3;
+ }
+
+ if (hdev->devid_source > 0) {
+ ptr[0] = 9;
+ ptr[1] = EIR_DEVICE_ID;
+
+ put_unaligned_le16(hdev->devid_source, ptr + 2);
+ put_unaligned_le16(hdev->devid_vendor, ptr + 4);
+ put_unaligned_le16(hdev->devid_product, ptr + 6);
+ put_unaligned_le16(hdev->devid_version, ptr + 8);
+
+ eir_len += 10;
+ ptr += 10;
+ }
+
memset(uuid16_list, 0, sizeof(uuid16_list));
/* Group all UUID16 types */
@@ -642,8 +660,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
bacpy(&rp.bdaddr, &hdev->bdaddr);
rp.version = hdev->hci_ver;
-
- put_unaligned_le16(hdev->manufacturer, &rp.manufacturer);
+ rp.manufacturer = cpu_to_le16(hdev->manufacturer);
rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
rp.current_settings = cpu_to_le32(get_current_settings(hdev));
@@ -840,7 +857,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("request for %s", hdev->name);
- timeout = get_unaligned_le16(&cp->timeout);
+ timeout = __le16_to_cpu(cp->timeout);
if (!cp->val && timeout > 0)
return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_INVALID_PARAMS);
@@ -1122,8 +1139,8 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
- MGMT_STATUS_BUSY);
+ err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_BUSY);
goto failed;
}
@@ -1179,7 +1196,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_lock(hdev);
- if (!enable_le || !(hdev->features[4] & LMP_LE)) {
+ if (!(hdev->features[4] & LMP_LE)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
MGMT_STATUS_NOT_SUPPORTED);
goto unlock;
@@ -1227,10 +1244,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
&hci_cp);
- if (err < 0) {
+ if (err < 0)
mgmt_pending_remove(cmd);
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1280,10 +1295,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
failed:
hci_dev_unlock(hdev);
@@ -1368,10 +1381,8 @@ update_class:
}
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1422,10 +1433,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
unlock:
hci_dev_unlock(hdev);
@@ -1439,7 +1448,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_link_key_info);
@@ -1512,7 +1521,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
else
err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
@@ -1524,7 +1533,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->disconnect) {
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr);
else
@@ -1548,7 +1557,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
@@ -1584,7 +1593,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@@ -1601,7 +1610,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- put_unaligned_le16(conn->handle, &dc.handle);
+ dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
@@ -1613,22 +1622,22 @@ failed:
return err;
}
-static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
- return MGMT_ADDR_LE_PUBLIC;
- case ADDR_LE_DEV_RANDOM:
- return MGMT_ADDR_LE_RANDOM;
+ return BDADDR_LE_PUBLIC;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to LE Random address type */
+ return BDADDR_LE_RANDOM;
}
- case ACL_LINK:
- return MGMT_ADDR_BREDR;
+
default:
- return MGMT_ADDR_INVALID;
+ /* Fallback to BR/EDR type */
+ return BDADDR_BREDR;
}
}
@@ -1669,13 +1678,13 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
continue;
bacpy(&rp->addr[i].bdaddr, &c->dst);
- rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
- if (rp->addr[i].type == MGMT_ADDR_INVALID)
+ rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
+ if (c->type == SCO_LINK || c->type == ESCO_LINK)
continue;
i++;
}
- put_unaligned_le16(i, &rp->conn_count);
+ rp->conn_count = cpu_to_le16(i);
/* Recalculate length in case of filtered SCO connections, etc */
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
@@ -1836,7 +1845,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
struct hci_conn *conn = cmd->user_data;
bacpy(&rp.addr.bdaddr, &conn->dst);
- rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
+ rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
&rp, sizeof(rp));
@@ -1890,12 +1899,12 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- if (cp->addr.type == MGMT_ADDR_BREDR)
- conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
else
- conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
- auth_type);
+ conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
+ cp->addr.type, sec_level, auth_type);
memset(&rp, 0, sizeof(rp));
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@@ -1923,7 +1932,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
/* For LE, just connecting isn't a proof that the pairing finished */
- if (cp->addr.type == MGMT_ADDR_BREDR)
+ if (cp->addr.type == BDADDR_BREDR)
conn->connect_cfm_cb = pairing_complete_cb;
conn->security_cfm_cb = pairing_complete_cb;
@@ -2000,7 +2009,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_BREDR)
+ if (type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
@@ -2011,7 +2020,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
goto done;
}
- if (type == MGMT_ADDR_LE_PUBLIC || type == MGMT_ADDR_LE_RANDOM) {
+ if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) {
/* Continue with pairing via SMP */
err = smp_user_confirm_reply(conn, mgmt_op, passkey);
@@ -2295,6 +2304,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
+ if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
+ err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
if (hdev->discovery.state != DISCOVERY_STOPPED) {
err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
MGMT_STATUS_BUSY);
@@ -2381,27 +2396,39 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (hdev->discovery.state == DISCOVERY_FINDING) {
- err = hci_cancel_inquiry(hdev);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ switch (hdev->discovery.state) {
+ case DISCOVERY_FINDING:
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
+ err = hci_cancel_inquiry(hdev);
else
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- goto unlock;
- }
+ err = hci_cancel_le_scan(hdev);
- e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING);
- if (!e) {
- mgmt_pending_remove(cmd);
- err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
- &mgmt_cp->type, sizeof(mgmt_cp->type));
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- goto unlock;
+ break;
+
+ case DISCOVERY_RESOLVING:
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e) {
+ mgmt_pending_remove(cmd);
+ err = cmd_complete(sk, hdev->id,
+ MGMT_OP_STOP_DISCOVERY, 0,
+ &mgmt_cp->type,
+ sizeof(mgmt_cp->type));
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+ goto unlock;
+ }
+
+ bacpy(&cp.bdaddr, &e->data.bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ sizeof(cp), &cp);
+
+ break;
+
+ default:
+ BT_DBG("unknown discovery state %u", hdev->discovery.state);
+ err = -EFAULT;
}
- bacpy(&cp.bdaddr, &e->data.bdaddr);
- err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
- &cp);
if (err < 0)
mgmt_pending_remove(cmd);
else
@@ -2501,6 +2528,37 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
return err;
}
+static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_device_id *cp = data;
+ int err;
+ __u16 source;
+
+ BT_DBG("%s", hdev->name);
+
+ source = __le16_to_cpu(cp->source);
+
+ if (source > 0x0002)
+ return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ hdev->devid_source = source;
+ hdev->devid_vendor = __le16_to_cpu(cp->vendor);
+ hdev->devid_product = __le16_to_cpu(cp->product);
+ hdev->devid_version = __le16_to_cpu(cp->version);
+
+ err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
+
+ update_eir(hdev);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -2565,7 +2623,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
u16 key_count, expected_len;
int i;
- key_count = get_unaligned_le16(&cp->key_count);
+ key_count = __le16_to_cpu(cp->key_count);
expected_len = sizeof(*cp) + key_count *
sizeof(struct mgmt_ltk_info);
@@ -2591,7 +2649,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
else
type = HCI_SMP_LTK_SLAVE;
- hci_add_ltk(hdev, &key->addr.bdaddr, key->addr.type,
+ hci_add_ltk(hdev, &key->addr.bdaddr,
+ bdaddr_to_le(key->addr.type),
type, 0, key->authenticated, key->val,
key->enc_size, key->ediv, key->rand);
}
@@ -2601,7 +2660,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return 0;
}
-struct mgmt_handler {
+static const struct mgmt_handler {
int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len);
bool var_len;
@@ -2647,6 +2706,7 @@ struct mgmt_handler {
{ confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
{ block_device, false, MGMT_BLOCK_DEVICE_SIZE },
{ unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
+ { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
};
@@ -2657,7 +2717,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
struct mgmt_hdr *hdr;
u16 opcode, index, len;
struct hci_dev *hdev = NULL;
- struct mgmt_handler *handler;
+ const struct mgmt_handler *handler;
int err;
BT_DBG("got %zu bytes", msglen);
@@ -2675,9 +2735,9 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
hdr = buf;
- opcode = get_unaligned_le16(&hdr->opcode);
- index = get_unaligned_le16(&hdr->index);
- len = get_unaligned_le16(&hdr->len);
+ opcode = __le16_to_cpu(hdr->opcode);
+ index = __le16_to_cpu(hdr->index);
+ len = __le16_to_cpu(hdr->len);
if (len != msglen - sizeof(*hdr)) {
err = -EINVAL;
@@ -2884,7 +2944,8 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ bool persistent)
{
struct mgmt_ev_new_link_key ev;
@@ -2892,7 +2953,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persisten
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = MGMT_ADDR_BREDR;
+ ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
memcpy(ev.key.val, key->val, 16);
ev.key.pin_len = key->pin_len;
@@ -2908,7 +2969,7 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
ev.store_hint = persistent;
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
- ev.key.addr.type = key->bdaddr_type;
+ ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
ev.key.authenticated = key->authenticated;
ev.key.enc_size = key->enc_size;
ev.key.ediv = key->ediv;
@@ -2932,7 +2993,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u16 eir_len = 0;
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->flags = __cpu_to_le32(flags);
@@ -2944,7 +3005,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV, dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
sizeof(*ev) + eir_len, NULL);
@@ -2995,13 +3056,13 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
bacpy(&ev.bdaddr, bdaddr);
- ev.type = link_to_mgmt(link_type, addr_type);
+ ev.type = link_to_bdaddr(link_type, addr_type);
err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
sk);
if (sk)
- sock_put(sk);
+ sock_put(sk);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
hdev);
@@ -3021,7 +3082,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
mgmt_status(status), &rp, sizeof(rp));
@@ -3039,7 +3100,7 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_connect_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3050,7 +3111,7 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
struct mgmt_ev_pin_code_request ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = MGMT_ADDR_BREDR;
+ ev.addr.type = BDADDR_BREDR;
ev.secure = secure;
return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
@@ -3069,7 +3130,7 @@ int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3091,7 +3152,7 @@ int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = MGMT_ADDR_BREDR;
+ rp.addr.type = BDADDR_BREDR;
err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
mgmt_status(status), &rp, sizeof(rp));
@@ -3110,9 +3171,9 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.confirm_hint = confirm_hint;
- put_unaligned_le32(value, &ev.value);
+ ev.value = value;
return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3126,7 +3187,7 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
BT_DBG("%s", hdev->name);
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
NULL);
@@ -3145,7 +3206,7 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
return -ENOENT;
bacpy(&rp.addr.bdaddr, bdaddr);
- rp.addr.type = link_to_mgmt(link_type, addr_type);
+ rp.addr.type = link_to_bdaddr(link_type, addr_type);
err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
&rp, sizeof(rp));
@@ -3188,7 +3249,7 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
struct mgmt_ev_auth_failed ev;
bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_mgmt(link_type, addr_type);
+ ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
@@ -3413,10 +3474,10 @@ int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
if (enable && test_and_clear_bit(HCI_LE_ENABLED,
&hdev->dev_flags))
- err = new_settings(hdev, NULL);
+ err = new_settings(hdev, NULL);
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev,
- cmd_status_rsp, &mgmt_err);
+ mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+ &mgmt_err);
return err;
}
@@ -3455,7 +3516,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
if (cfm_name)
ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
@@ -3469,7 +3530,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
dev_class, 3);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
ev_size = sizeof(*ev) + eir_len;
@@ -3488,13 +3549,13 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
memset(buf, 0, sizeof(buf));
bacpy(&ev->addr.bdaddr, bdaddr);
- ev->addr.type = link_to_mgmt(link_type, addr_type);
+ ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
name_len);
- put_unaligned_le16(eir_len, &ev->eir_len);
+ ev->eir_len = cpu_to_le16(eir_len);
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
sizeof(*ev) + eir_len, NULL);
@@ -3594,6 +3655,3 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
module_param(enable_hs, bool, 0644);
MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
-
-module_param(enable_le, bool, 0644);
-MODULE_PARM_DESC(enable_le, "Enable Low Energy support");
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index a55a43e9f70..e8707debb86 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -260,7 +260,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
if (parent) {
sk->sk_type = parent->sk_type;
- pi->dlc->defer_setup = bt_sk(parent)->defer_setup;
+ pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
+ &bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
@@ -731,7 +732,11 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
break;
}
- bt_sk(sk)->defer_setup = opt;
+ if (opt)
+ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+
break;
default:
@@ -849,7 +854,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
break;
}
- if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
+ if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
+ (u32 __user *) optval))
err = -EFAULT;
break;
@@ -972,7 +978,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
done:
bh_unlock_sock(parent);
- if (bt_sk(parent)->defer_setup)
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f6ab1290796..cbdd313659a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -61,8 +61,6 @@ static struct bt_sock_list sco_sk_list = {
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
-static int sco_conn_del(struct hci_conn *conn, int err);
-
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
@@ -95,12 +93,12 @@ static void sco_sock_clear_timer(struct sock *sk)
}
/* ---- SCO connections ---- */
-static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
+static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
- if (conn || status)
+ if (conn)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
@@ -195,13 +193,14 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
+ HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
- conn = sco_conn_add(hcon, 0);
+ conn = sco_conn_add(hcon);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
@@ -233,7 +232,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
- int err, count;
+ int err;
/* Check outgoing MTU */
if (len > conn->mtu)
@@ -241,20 +240,18 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
BT_DBG("sk %p len %d", sk, len);
- count = min_t(unsigned int, conn->mtu, len);
- skb = bt_skb_send_alloc(sk, count,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
- if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
- return count;
+ return len;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
@@ -277,17 +274,20 @@ drop:
}
/* -------- Socket interface ---------- */
-static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
+static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
{
- struct sock *sk;
struct hlist_node *node;
+ struct sock *sk;
+
+ sk_for_each(sk, node, &sco_sk_list.head) {
+ if (sk->sk_state != BT_LISTEN)
+ continue;
- sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
- goto found;
- sk = NULL;
-found:
- return sk;
+ return sk;
+ }
+
+ return NULL;
}
/* Find socket listening on source bdaddr.
@@ -466,7 +466,6 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
@@ -481,17 +480,14 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
goto done;
}
- write_lock(&sco_sk_list.lock);
-
- if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
- sk->sk_state = BT_BOUND;
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
}
- write_unlock(&sco_sk_list.lock);
+ bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
@@ -537,21 +533,38 @@ done:
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ bdaddr_t *src = &bt_sk(sk)->src;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
- if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
+ if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ write_lock(&sco_sk_list.lock);
+
+ if (__sco_get_sock_listen_by_addr(src)) {
+ err = -EADDRINUSE;
+ goto unlock;
+ }
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
+
sk->sk_state = BT_LISTEN;
+unlock:
+ write_unlock(&sco_sk_list.lock);
+
done:
release_sock(sk);
return err;
@@ -923,7 +936,7 @@ int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (!status) {
struct sco_conn *conn;
- conn = sco_conn_add(hcon, status);
+ conn = sco_conn_add(hcon);
if (conn)
sco_conn_ready(conn);
} else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index deb119875fd..6fc7c4708f3 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -956,7 +956,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
HCI_SMP_LTK_SLAVE, 1, authenticated,
enc.ltk, smp->enc_key_size, ediv, ident.rand);
- ident.ediv = cpu_to_le16(ediv);
+ ident.ediv = ediv;
smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);