diff options
Diffstat (limited to 'net/bluetooth/hci_core.c')
| -rw-r--r-- | net/bluetooth/hci_core.c | 2337 | 
1 files changed, 2091 insertions, 246 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index fb7356fcfe5..0a43cce9a91 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -27,11 +27,16 @@  #include <linux/export.h>  #include <linux/idr.h> -  #include <linux/rfkill.h> +#include <linux/debugfs.h> +#include <linux/crypto.h> +#include <asm/unaligned.h>  #include <net/bluetooth/bluetooth.h>  #include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "smp.h"  static void hci_rx_work(struct work_struct *work);  static void hci_cmd_work(struct work_struct *work); @@ -55,6 +60,1021 @@ static void hci_notify(struct hci_dev *hdev, int event)  	hci_sock_dev_event(hdev, event);  } +/* ---- HCI debugfs entries ---- */ + +static ssize_t dut_mode_read(struct file *file, char __user *user_buf, +			     size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, +			      size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	struct sk_buff *skb; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); +	bool enable; +	int err; + +	if (!test_bit(HCI_UP, &hdev->flags)) +		return -ENETDOWN; + +	if (copy_from_user(buf, user_buf, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; +	if (strtobool(buf, &enable)) +		return -EINVAL; + +	if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags)) +		return -EALREADY; + +	hci_req_lock(hdev); +	if (enable) +		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, +				     HCI_CMD_TIMEOUT); +	else +		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, +				     HCI_CMD_TIMEOUT); +	hci_req_unlock(hdev); + +	if (IS_ERR(skb)) +		return PTR_ERR(skb); + +	err = -bt_to_errno(skb->data[0]); +	kfree_skb(skb); + +	if (err < 0) +		return err; + +	change_bit(HCI_DUT_MODE, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations dut_mode_fops = { +	.open		= simple_open, +	.read		= dut_mode_read, +	.write		= dut_mode_write, +	.llseek		= default_llseek, +}; + +static int features_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	u8 p; + +	hci_dev_lock(hdev); +	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { +		seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x " +			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p, +			   hdev->features[p][0], hdev->features[p][1], +			   hdev->features[p][2], hdev->features[p][3], +			   hdev->features[p][4], hdev->features[p][5], +			   hdev->features[p][6], hdev->features[p][7]); +	} +	if (lmp_le_capable(hdev)) +		seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x " +			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", +			   hdev->le_features[0], hdev->le_features[1], +			   hdev->le_features[2], hdev->le_features[3], +			   hdev->le_features[4], hdev->le_features[5], +			   hdev->le_features[6], hdev->le_features[7]); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int features_open(struct inode *inode, struct file *file) +{ +	return single_open(file, features_show, inode->i_private); +} + +static const struct file_operations features_fops = { +	.open		= features_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int blacklist_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	struct bdaddr_list *b; + +	hci_dev_lock(hdev); +	list_for_each_entry(b, &hdev->blacklist, list) +		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int blacklist_open(struct inode *inode, struct file *file) +{ +	return single_open(file, blacklist_show, inode->i_private); +} + +static const struct file_operations blacklist_fops = { +	.open		= blacklist_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int uuids_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	struct bt_uuid *uuid; + +	hci_dev_lock(hdev); +	list_for_each_entry(uuid, &hdev->uuids, list) { +		u8 i, val[16]; + +		/* The Bluetooth UUID values are stored in big endian, +		 * but with reversed byte order. So convert them into +		 * the right order for the %pUb modifier. +		 */ +		for (i = 0; i < 16; i++) +			val[i] = uuid->uuid[15 - i]; + +		seq_printf(f, "%pUb\n", val); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int uuids_open(struct inode *inode, struct file *file) +{ +	return single_open(file, uuids_show, inode->i_private); +} + +static const struct file_operations uuids_fops = { +	.open		= uuids_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int inquiry_cache_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	struct discovery_state *cache = &hdev->discovery; +	struct inquiry_entry *e; + +	hci_dev_lock(hdev); + +	list_for_each_entry(e, &cache->all, all) { +		struct inquiry_data *data = &e->data; +		seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", +			   &data->bdaddr, +			   data->pscan_rep_mode, data->pscan_period_mode, +			   data->pscan_mode, data->dev_class[2], +			   data->dev_class[1], data->dev_class[0], +			   __le16_to_cpu(data->clock_offset), +			   data->rssi, data->ssp_mode, e->timestamp); +	} + +	hci_dev_unlock(hdev); + +	return 0; +} + +static int inquiry_cache_open(struct inode *inode, struct file *file) +{ +	return single_open(file, inquiry_cache_show, inode->i_private); +} + +static const struct file_operations inquiry_cache_fops = { +	.open		= inquiry_cache_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int link_keys_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct list_head *p, *n; + +	hci_dev_lock(hdev); +	list_for_each_safe(p, n, &hdev->link_keys) { +		struct link_key *key = list_entry(p, struct link_key, list); +		seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type, +			   HCI_LINK_KEY_SIZE, key->val, key->pin_len); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int link_keys_open(struct inode *inode, struct file *file) +{ +	return single_open(file, link_keys_show, inode->i_private); +} + +static const struct file_operations link_keys_fops = { +	.open		= link_keys_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int dev_class_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; + +	hci_dev_lock(hdev); +	seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], +		   hdev->dev_class[1], hdev->dev_class[0]); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int dev_class_open(struct inode *inode, struct file *file) +{ +	return single_open(file, dev_class_show, inode->i_private); +} + +static const struct file_operations dev_class_fops = { +	.open		= dev_class_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int voice_setting_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->voice_setting; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get, +			NULL, "0x%4.4llx\n"); + +static int auto_accept_delay_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	hdev->auto_accept_delay = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int auto_accept_delay_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->auto_accept_delay; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, +			auto_accept_delay_set, "%llu\n"); + +static int ssp_debug_mode_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; +	struct sk_buff *skb; +	__u8 mode; +	int err; + +	if (val != 0 && val != 1) +		return -EINVAL; + +	if (!test_bit(HCI_UP, &hdev->flags)) +		return -ENETDOWN; + +	hci_req_lock(hdev); +	mode = val; +	skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), +			     &mode, HCI_CMD_TIMEOUT); +	hci_req_unlock(hdev); + +	if (IS_ERR(skb)) +		return PTR_ERR(skb); + +	err = -bt_to_errno(skb->data[0]); +	kfree_skb(skb); + +	if (err < 0) +		return err; + +	hci_dev_lock(hdev); +	hdev->ssp_debug_mode = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int ssp_debug_mode_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->ssp_debug_mode; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get, +			ssp_debug_mode_set, "%llu\n"); + +static ssize_t force_sc_support_read(struct file *file, char __user *user_buf, +				     size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_sc_support_write(struct file *file, +				      const char __user *user_buf, +				      size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); +	bool enable; + +	if (test_bit(HCI_UP, &hdev->flags)) +		return -EBUSY; + +	if (copy_from_user(buf, user_buf, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; +	if (strtobool(buf, &enable)) +		return -EINVAL; + +	if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags)) +		return -EALREADY; + +	change_bit(HCI_FORCE_SC, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations force_sc_support_fops = { +	.open		= simple_open, +	.read		= force_sc_support_read, +	.write		= force_sc_support_write, +	.llseek		= default_llseek, +}; + +static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf, +				 size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static const struct file_operations sc_only_mode_fops = { +	.open		= simple_open, +	.read		= sc_only_mode_read, +	.llseek		= default_llseek, +}; + +static int idle_timeout_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val != 0 && (val < 500 || val > 3600000)) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->idle_timeout = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int idle_timeout_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->idle_timeout; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, +			idle_timeout_set, "%llu\n"); + +static int rpa_timeout_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	/* Require the RPA timeout to be at least 30 seconds and at most +	 * 24 hours. +	 */ +	if (val < 30 || val > (60 * 60 * 24)) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->rpa_timeout = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int rpa_timeout_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->rpa_timeout; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, +			rpa_timeout_set, "%llu\n"); + +static int sniff_min_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val % 2 || val > hdev->sniff_max_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->sniff_min_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int sniff_min_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->sniff_min_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, +			sniff_min_interval_set, "%llu\n"); + +static int sniff_max_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val % 2 || val < hdev->sniff_min_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->sniff_max_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int sniff_max_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->sniff_max_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, +			sniff_max_interval_set, "%llu\n"); + +static int conn_info_min_age_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val > hdev->conn_info_max_age) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->conn_info_min_age = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_info_min_age_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->conn_info_min_age; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get, +			conn_info_min_age_set, "%llu\n"); + +static int conn_info_max_age_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val == 0 || val < hdev->conn_info_min_age) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->conn_info_max_age = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_info_max_age_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->conn_info_max_age; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get, +			conn_info_max_age_set, "%llu\n"); + +static int identity_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; +	bdaddr_t addr; +	u8 addr_type; + +	hci_dev_lock(hdev); + +	hci_copy_identity_address(hdev, &addr, &addr_type); + +	seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type, +		   16, hdev->irk, &hdev->rpa); + +	hci_dev_unlock(hdev); + +	return 0; +} + +static int identity_open(struct inode *inode, struct file *file) +{ +	return single_open(file, identity_show, inode->i_private); +} + +static const struct file_operations identity_fops = { +	.open		= identity_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int random_address_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; + +	hci_dev_lock(hdev); +	seq_printf(f, "%pMR\n", &hdev->random_addr); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int random_address_open(struct inode *inode, struct file *file) +{ +	return single_open(file, random_address_show, inode->i_private); +} + +static const struct file_operations random_address_fops = { +	.open		= random_address_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int static_address_show(struct seq_file *f, void *p) +{ +	struct hci_dev *hdev = f->private; + +	hci_dev_lock(hdev); +	seq_printf(f, "%pMR\n", &hdev->static_addr); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int static_address_open(struct inode *inode, struct file *file) +{ +	return single_open(file, static_address_show, inode->i_private); +} + +static const struct file_operations static_address_fops = { +	.open		= static_address_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static ssize_t force_static_address_read(struct file *file, +					 char __user *user_buf, +					 size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_static_address_write(struct file *file, +					  const char __user *user_buf, +					  size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); +	bool enable; + +	if (test_bit(HCI_UP, &hdev->flags)) +		return -EBUSY; + +	if (copy_from_user(buf, user_buf, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; +	if (strtobool(buf, &enable)) +		return -EINVAL; + +	if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags)) +		return -EALREADY; + +	change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations force_static_address_fops = { +	.open		= simple_open, +	.read		= force_static_address_read, +	.write		= force_static_address_write, +	.llseek		= default_llseek, +}; + +static int white_list_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct bdaddr_list *b; + +	hci_dev_lock(hdev); +	list_for_each_entry(b, &hdev->le_white_list, list) +		seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); +	hci_dev_unlock(hdev); + +	return 0; +} + +static int white_list_open(struct inode *inode, struct file *file) +{ +	return single_open(file, white_list_show, inode->i_private); +} + +static const struct file_operations white_list_fops = { +	.open		= white_list_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int identity_resolving_keys_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct list_head *p, *n; + +	hci_dev_lock(hdev); +	list_for_each_safe(p, n, &hdev->identity_resolving_keys) { +		struct smp_irk *irk = list_entry(p, struct smp_irk, list); +		seq_printf(f, "%pMR (type %u) %*phN %pMR\n", +			   &irk->bdaddr, irk->addr_type, +			   16, irk->val, &irk->rpa); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int identity_resolving_keys_open(struct inode *inode, struct file *file) +{ +	return single_open(file, identity_resolving_keys_show, +			   inode->i_private); +} + +static const struct file_operations identity_resolving_keys_fops = { +	.open		= identity_resolving_keys_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int long_term_keys_show(struct seq_file *f, void *ptr) +{ +	struct hci_dev *hdev = f->private; +	struct list_head *p, *n; + +	hci_dev_lock(hdev); +	list_for_each_safe(p, n, &hdev->long_term_keys) { +		struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list); +		seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n", +			   <k->bdaddr, ltk->bdaddr_type, ltk->authenticated, +			   ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv), +			   __le64_to_cpu(ltk->rand), 16, ltk->val); +	} +	hci_dev_unlock(hdev); + +	return 0; +} + +static int long_term_keys_open(struct inode *inode, struct file *file) +{ +	return single_open(file, long_term_keys_show, inode->i_private); +} + +static const struct file_operations long_term_keys_fops = { +	.open		= long_term_keys_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int conn_min_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->le_conn_min_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_min_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->le_conn_min_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, +			conn_min_interval_set, "%llu\n"); + +static int conn_max_interval_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->le_conn_max_interval = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int conn_max_interval_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->le_conn_max_interval; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, +			conn_max_interval_set, "%llu\n"); + +static int adv_channel_map_set(void *data, u64 val) +{ +	struct hci_dev *hdev = data; + +	if (val < 0x01 || val > 0x07) +		return -EINVAL; + +	hci_dev_lock(hdev); +	hdev->le_adv_channel_map = val; +	hci_dev_unlock(hdev); + +	return 0; +} + +static int adv_channel_map_get(void *data, u64 *val) +{ +	struct hci_dev *hdev = data; + +	hci_dev_lock(hdev); +	*val = hdev->le_adv_channel_map; +	hci_dev_unlock(hdev); + +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, +			adv_channel_map_set, "%llu\n"); + +static ssize_t lowpan_read(struct file *file, char __user *user_buf, +			   size_t count, loff_t *ppos) +{ +	struct hci_dev *hdev = file->private_data; +	char buf[3]; + +	buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N'; +	buf[1] = '\n'; +	buf[2] = '\0'; +	return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer, +			    size_t count, loff_t *position) +{ +	struct hci_dev *hdev = fp->private_data; +	bool enable; +	char buf[32]; +	size_t buf_size = min(count, (sizeof(buf)-1)); + +	if (copy_from_user(buf, user_buffer, buf_size)) +		return -EFAULT; + +	buf[buf_size] = '\0'; + +	if (strtobool(buf, &enable) < 0) +		return -EINVAL; + +	if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) +		return -EALREADY; + +	change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags); + +	return count; +} + +static const struct file_operations lowpan_debugfs_fops = { +	.open		= simple_open, +	.read		= lowpan_read, +	.write		= lowpan_write, +	.llseek		= default_llseek, +}; + +static int le_auto_conn_show(struct seq_file *sf, void *ptr) +{ +	struct hci_dev *hdev = sf->private; +	struct hci_conn_params *p; + +	hci_dev_lock(hdev); + +	list_for_each_entry(p, &hdev->le_conn_params, list) { +		seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type, +			   p->auto_connect); +	} + +	hci_dev_unlock(hdev); + +	return 0; +} + +static int le_auto_conn_open(struct inode *inode, struct file *file) +{ +	return single_open(file, le_auto_conn_show, inode->i_private); +} + +static ssize_t le_auto_conn_write(struct file *file, const char __user *data, +				  size_t count, loff_t *offset) +{ +	struct seq_file *sf = file->private_data; +	struct hci_dev *hdev = sf->private; +	u8 auto_connect = 0; +	bdaddr_t addr; +	u8 addr_type; +	char *buf; +	int err = 0; +	int n; + +	/* Don't allow partial write */ +	if (*offset != 0) +		return -EINVAL; + +	if (count < 3) +		return -EINVAL; + +	buf = memdup_user(data, count); +	if (IS_ERR(buf)) +		return PTR_ERR(buf); + +	if (memcmp(buf, "add", 3) == 0) { +		n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", +			   &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], +			   &addr.b[1], &addr.b[0], &addr_type, +			   &auto_connect); + +		if (n < 7) { +			err = -EINVAL; +			goto done; +		} + +		hci_dev_lock(hdev); +		err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect, +					  hdev->le_conn_min_interval, +					  hdev->le_conn_max_interval); +		hci_dev_unlock(hdev); + +		if (err) +			goto done; +	} else if (memcmp(buf, "del", 3) == 0) { +		n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", +			   &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], +			   &addr.b[1], &addr.b[0], &addr_type); + +		if (n < 7) { +			err = -EINVAL; +			goto done; +		} + +		hci_dev_lock(hdev); +		hci_conn_params_del(hdev, &addr, addr_type); +		hci_dev_unlock(hdev); +	} else if (memcmp(buf, "clr", 3) == 0) { +		hci_dev_lock(hdev); +		hci_conn_params_clear(hdev); +		hci_pend_le_conns_clear(hdev); +		hci_update_background_scan(hdev); +		hci_dev_unlock(hdev); +	} else { +		err = -EINVAL; +	} + +done: +	kfree(buf); + +	if (err) +		return err; +	else +		return count; +} + +static const struct file_operations le_auto_conn_fops = { +	.open		= le_auto_conn_open, +	.read		= seq_read, +	.write		= le_auto_conn_write, +	.llseek		= seq_lseek, +	.release	= single_release, +}; +  /* ---- HCI requests ---- */  static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) @@ -307,11 +1327,23 @@ static void amp_init(struct hci_request *req)  	/* Read Local Version */  	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); +	/* Read Local Supported Commands */ +	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); + +	/* Read Local Supported Features */ +	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); +  	/* Read Local AMP Info */  	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);  	/* Read Data Blk size */  	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); + +	/* Read Flow Control Mode */ +	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); + +	/* Read Location Data */ +	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);  }  static void hci_init1_req(struct hci_request *req, unsigned long opt) @@ -341,6 +1373,8 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)  static void bredr_setup(struct hci_request *req)  { +	struct hci_dev *hdev = req->hdev; +  	__le16 param;  	__u8 flt_type; @@ -356,16 +1390,24 @@ static void bredr_setup(struct hci_request *req)  	/* Read Voice Setting */  	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); +	/* Read Number of Supported IAC */ +	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); + +	/* Read Current IAC LAP */ +	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); +  	/* Clear Event Filters */  	flt_type = HCI_FLT_CLEAR_ALL;  	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);  	/* Connection accept timeout ~20 secs */ -	param = __constant_cpu_to_le16(0x7d00); +	param = cpu_to_le16(0x7d00);  	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); -	/* Read page scan parameters */ -	if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { +	/* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2, +	 * but it does not support page scan related HCI commands. +	 */ +	if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {  		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);  		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);  	} @@ -381,14 +1423,17 @@ static void le_setup(struct hci_request *req)  	/* Read LE Local Supported Features */  	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); +	/* Read LE Supported States */ +	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); +  	/* Read LE Advertising Channel TX Power */  	hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);  	/* Read LE White List Size */  	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); -	/* Read LE Supported States */ -	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); +	/* Clear LE White List */ +	hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);  	/* LE-only controllers have LE implicitly enabled */  	if (!lmp_bredr_capable(hdev)) @@ -519,6 +1564,8 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)  	if (lmp_bredr_capable(hdev))  		bredr_setup(req); +	else +		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);  	if (lmp_le_capable(hdev))  		le_setup(req); @@ -532,6 +1579,14 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)  		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);  	if (lmp_ssp_capable(hdev)) { +		/* When SSP is available, then the host features page +		 * should also be available as well. However some +		 * controllers list the max_page as 0 as long as SSP +		 * has not been enabled. To achieve proper debugging +		 * output, force the minimum max_page to 1 at least. +		 */ +		hdev->max_page = 0x01; +  		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {  			u8 mode = 0x01;  			hci_req_add(req, HCI_OP_WRITE_SSP_MODE, @@ -607,6 +1662,38 @@ static void hci_set_le_support(struct hci_request *req)  			    &cp);  } +static void hci_set_event_mask_page_2(struct hci_request *req) +{ +	struct hci_dev *hdev = req->hdev; +	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + +	/* If Connectionless Slave Broadcast master role is supported +	 * enable all necessary events for it. +	 */ +	if (lmp_csb_master_capable(hdev)) { +		events[1] |= 0x40;	/* Triggered Clock Capture */ +		events[1] |= 0x80;	/* Synchronization Train Complete */ +		events[2] |= 0x10;	/* Slave Page Response Timeout */ +		events[2] |= 0x20;	/* CSB Channel Map Change */ +	} + +	/* If Connectionless Slave Broadcast slave role is supported +	 * enable all necessary events for it. +	 */ +	if (lmp_csb_slave_capable(hdev)) { +		events[2] |= 0x01;	/* Synchronization Train Received */ +		events[2] |= 0x02;	/* CSB Receive */ +		events[2] |= 0x04;	/* CSB Timeout */ +		events[2] |= 0x08;	/* Truncated Page Complete */ +	} + +	/* Enable Authenticated Payload Timeout Expired event if supported */ +	if (lmp_ping_capable(hdev)) +		events[2] |= 0x80; + +	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); +} +  static void hci_init3_req(struct hci_request *req, unsigned long opt)  {  	struct hci_dev *hdev = req->hdev; @@ -620,8 +1707,13 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)  	 * as supported send it. If not supported assume that the controller  	 * does not have actual support for stored link keys which makes this  	 * command redundant anyway. +	 * +	 * Some controllers indicate that they support handling deleting +	 * stored link keys, but they don't. The quirk lets a driver +	 * just disable this command.  	 */ -	if (hdev->commands[6] & 0x80) { +	if (hdev->commands[6] & 0x80 && +	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {  		struct hci_cp_delete_stored_link_key cp;  		bacpy(&cp.bdaddr, BDADDR_ANY); @@ -633,10 +1725,8 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)  	if (hdev->commands[5] & 0x10)  		hci_setup_link_policy(req); -	if (lmp_le_capable(hdev)) { +	if (lmp_le_capable(hdev))  		hci_set_le_support(req); -		hci_update_ad(req); -	}  	/* Read features beyond page 1 if available */  	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { @@ -648,6 +1738,28 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)  	}  } +static void hci_init4_req(struct hci_request *req, unsigned long opt) +{ +	struct hci_dev *hdev = req->hdev; + +	/* Set event mask page 2 if the HCI command for it is supported */ +	if (hdev->commands[22] & 0x04) +		hci_set_event_mask_page_2(req); + +	/* Check for Synchronization Train support */ +	if (lmp_sync_train_capable(hdev)) +		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); + +	/* Enable Secure Connections if supported and configured */ +	if ((lmp_sc_capable(hdev) || +	     test_bit(HCI_FORCE_SC, &hdev->dev_flags)) && +	    test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { +		u8 support = 0x01; +		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, +			    sizeof(support), &support); +	} +} +  static int __hci_init(struct hci_dev *hdev)  {  	int err; @@ -656,6 +1768,14 @@ static int __hci_init(struct hci_dev *hdev)  	if (err < 0)  		return err; +	/* The Device Under Test (DUT) mode is special and available for +	 * all controller types. So just create it early on. +	 */ +	if (test_bit(HCI_SETUP, &hdev->dev_flags)) { +		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, +				    &dut_mode_fops); +	} +  	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode  	 * BR/EDR/LE type controllers. AMP controllers only need the  	 * first stage init. @@ -667,7 +1787,110 @@ static int __hci_init(struct hci_dev *hdev)  	if (err < 0)  		return err; -	return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); +	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); +	if (err < 0) +		return err; + +	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT); +	if (err < 0) +		return err; + +	/* Only create debugfs entries during the initial setup +	 * phase and not every time the controller gets powered on. +	 */ +	if (!test_bit(HCI_SETUP, &hdev->dev_flags)) +		return 0; + +	debugfs_create_file("features", 0444, hdev->debugfs, hdev, +			    &features_fops); +	debugfs_create_u16("manufacturer", 0444, hdev->debugfs, +			   &hdev->manufacturer); +	debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver); +	debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); +	debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, +			    &blacklist_fops); +	debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); + +	debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev, +			    &conn_info_min_age_fops); +	debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev, +			    &conn_info_max_age_fops); + +	if (lmp_bredr_capable(hdev)) { +		debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, +				    hdev, &inquiry_cache_fops); +		debugfs_create_file("link_keys", 0400, hdev->debugfs, +				    hdev, &link_keys_fops); +		debugfs_create_file("dev_class", 0444, hdev->debugfs, +				    hdev, &dev_class_fops); +		debugfs_create_file("voice_setting", 0444, hdev->debugfs, +				    hdev, &voice_setting_fops); +	} + +	if (lmp_ssp_capable(hdev)) { +		debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, +				    hdev, &auto_accept_delay_fops); +		debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs, +				    hdev, &ssp_debug_mode_fops); +		debugfs_create_file("force_sc_support", 0644, hdev->debugfs, +				    hdev, &force_sc_support_fops); +		debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, +				    hdev, &sc_only_mode_fops); +	} + +	if (lmp_sniff_capable(hdev)) { +		debugfs_create_file("idle_timeout", 0644, hdev->debugfs, +				    hdev, &idle_timeout_fops); +		debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs, +				    hdev, &sniff_min_interval_fops); +		debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs, +				    hdev, &sniff_max_interval_fops); +	} + +	if (lmp_le_capable(hdev)) { +		debugfs_create_file("identity", 0400, hdev->debugfs, +				    hdev, &identity_fops); +		debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, +				    hdev, &rpa_timeout_fops); +		debugfs_create_file("random_address", 0444, hdev->debugfs, +				    hdev, &random_address_fops); +		debugfs_create_file("static_address", 0444, hdev->debugfs, +				    hdev, &static_address_fops); + +		/* For controllers with a public address, provide a debug +		 * option to force the usage of the configured static +		 * address. By default the public address is used. +		 */ +		if (bacmp(&hdev->bdaddr, BDADDR_ANY)) +			debugfs_create_file("force_static_address", 0644, +					    hdev->debugfs, hdev, +					    &force_static_address_fops); + +		debugfs_create_u8("white_list_size", 0444, hdev->debugfs, +				  &hdev->le_white_list_size); +		debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, +				    &white_list_fops); +		debugfs_create_file("identity_resolving_keys", 0400, +				    hdev->debugfs, hdev, +				    &identity_resolving_keys_fops); +		debugfs_create_file("long_term_keys", 0400, hdev->debugfs, +				    hdev, &long_term_keys_fops); +		debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, +				    hdev, &conn_min_interval_fops); +		debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, +				    hdev, &conn_max_interval_fops); +		debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, +				    hdev, &adv_channel_map_fops); +		debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev, +				    &lowpan_debugfs_fops); +		debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev, +				    &le_auto_conn_fops); +		debugfs_create_u16("discov_interleaved_timeout", 0644, +				   hdev->debugfs, +				   &hdev->discov_interleaved_timeout); +	} + +	return 0;  }  static void hci_scan_req(struct hci_request *req, unsigned long opt) @@ -757,6 +1980,8 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)  	switch (state) {  	case DISCOVERY_STOPPED: +		hci_update_background_scan(hdev); +  		if (hdev->discovery.state != DISCOVERY_STARTING)  			mgmt_discovering(hdev, 0);  		break; @@ -868,12 +2093,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,  	hci_remove_remote_oob_data(hdev, &data->bdaddr); -	if (ssp) -		*ssp = data->ssp_mode; +	*ssp = data->ssp_mode;  	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);  	if (ie) { -		if (ie->data.ssp_mode && ssp) +		if (ie->data.ssp_mode)  			*ssp = true;  		if (ie->name_state == NAME_NEEDED && @@ -984,6 +2208,21 @@ int hci_inquiry(void __user *arg)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		err = -EBUSY; +		goto done; +	} + +	if (hdev->dev_type != HCI_BREDR) { +		err = -EOPNOTSUPP; +		goto done; +	} + +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { +		err = -EOPNOTSUPP; +		goto done; +	} +  	hci_dev_lock(hdev);  	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||  	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { @@ -1043,100 +2282,10 @@ done:  	return err;  } -static u8 create_ad(struct hci_dev *hdev, u8 *ptr) +static int hci_dev_do_open(struct hci_dev *hdev)  { -	u8 ad_len = 0, flags = 0; -	size_t name_len; - -	if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) -		flags |= LE_AD_GENERAL; - -	if (!lmp_bredr_capable(hdev)) -		flags |= LE_AD_NO_BREDR; - -	if (lmp_le_br_capable(hdev)) -		flags |= LE_AD_SIM_LE_BREDR_CTRL; - -	if (lmp_host_le_br_capable(hdev)) -		flags |= LE_AD_SIM_LE_BREDR_HOST; - -	if (flags) { -		BT_DBG("adv flags 0x%02x", flags); - -		ptr[0] = 2; -		ptr[1] = EIR_FLAGS; -		ptr[2] = flags; - -		ad_len += 3; -		ptr += 3; -	} - -	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) { -		ptr[0] = 2; -		ptr[1] = EIR_TX_POWER; -		ptr[2] = (u8) hdev->adv_tx_power; - -		ad_len += 3; -		ptr += 3; -	} - -	name_len = strlen(hdev->dev_name); -	if (name_len > 0) { -		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2; - -		if (name_len > max_len) { -			name_len = max_len; -			ptr[1] = EIR_NAME_SHORT; -		} else -			ptr[1] = EIR_NAME_COMPLETE; - -		ptr[0] = name_len + 1; - -		memcpy(ptr + 2, hdev->dev_name, name_len); - -		ad_len += (name_len + 2); -		ptr += (name_len + 2); -	} - -	return ad_len; -} - -void hci_update_ad(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	struct hci_cp_le_set_adv_data cp; -	u8 len; - -	if (!lmp_le_capable(hdev)) -		return; - -	memset(&cp, 0, sizeof(cp)); - -	len = create_ad(hdev, cp.data); - -	if (hdev->adv_data_len == len && -	    memcmp(cp.data, hdev->adv_data, len) == 0) -		return; - -	memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); -	hdev->adv_data_len = len; - -	cp.length = len; - -	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); -} - -/* ---- HCI ioctl helpers ---- */ - -int hci_dev_open(__u16 dev) -{ -	struct hci_dev *hdev;  	int ret = 0; -	hdev = hci_dev_get(dev); -	if (!hdev) -		return -ENODEV; -  	BT_DBG("%s %p", hdev->name, hdev);  	hci_req_lock(hdev); @@ -1146,13 +2295,34 @@ int hci_dev_open(__u16 dev)  		goto done;  	} -	/* Check for rfkill but allow the HCI setup stage to proceed -	 * (which in itself doesn't cause any RF activity). -	 */ -	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) && -	    !test_bit(HCI_SETUP, &hdev->dev_flags)) { -		ret = -ERFKILL; -		goto done; +	if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { +		/* Check for rfkill but allow the HCI setup stage to +		 * proceed (which in itself doesn't cause any RF activity). +		 */ +		if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { +			ret = -ERFKILL; +			goto done; +		} + +		/* Check for valid public address or a configured static +		 * random adddress, but let the HCI setup proceed to +		 * be able to determine if there is a public address +		 * or not. +		 * +		 * In case of user channel usage, it is not important +		 * if a public address or static random address is +		 * available. +		 * +		 * This check is only valid for BR/EDR controllers +		 * since AMP controllers do not have an address. +		 */ +		if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && +		    hdev->dev_type == HCI_BREDR && +		    !bacmp(&hdev->bdaddr, BDADDR_ANY) && +		    !bacmp(&hdev->static_addr, BDADDR_ANY)) { +			ret = -EADDRNOTAVAIL; +			goto done; +		}  	}  	if (test_bit(HCI_UP, &hdev->flags)) { @@ -1172,16 +2342,11 @@ int hci_dev_open(__u16 dev)  		ret = hdev->setup(hdev);  	if (!ret) { -		/* Treat all non BR/EDR controllers as raw devices if -		 * enable_hs is not set. -		 */ -		if (hdev->dev_type != HCI_BREDR && !enable_hs) -			set_bit(HCI_RAW, &hdev->flags); -  		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))  			set_bit(HCI_RAW, &hdev->flags); -		if (!test_bit(HCI_RAW, &hdev->flags)) +		if (!test_bit(HCI_RAW, &hdev->flags) && +		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))  			ret = __hci_init(hdev);  	} @@ -1189,10 +2354,12 @@ int hci_dev_open(__u16 dev)  	if (!ret) {  		hci_dev_hold(hdev); +		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);  		set_bit(HCI_UP, &hdev->flags);  		hci_notify(hdev, HCI_DEV_UP);  		if (!test_bit(HCI_SETUP, &hdev->dev_flags) && -		    mgmt_valid_hdev(hdev)) { +		    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && +		    hdev->dev_type == HCI_BREDR) {  			hci_dev_lock(hdev);  			mgmt_powered(hdev, 1);  			hci_dev_unlock(hdev); @@ -1220,10 +2387,41 @@ int hci_dev_open(__u16 dev)  done:  	hci_req_unlock(hdev); -	hci_dev_put(hdev);  	return ret;  } +/* ---- HCI ioctl helpers ---- */ + +int hci_dev_open(__u16 dev) +{ +	struct hci_dev *hdev; +	int err; + +	hdev = hci_dev_get(dev); +	if (!hdev) +		return -ENODEV; + +	/* We need to ensure that no other power on/off work is pending +	 * before proceeding to call hci_dev_do_open. This is +	 * particularly important if the setup procedure has not yet +	 * completed. +	 */ +	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) +		cancel_delayed_work(&hdev->power_off); + +	/* After this call it is guaranteed that the setup procedure +	 * has finished. This means that error conditions like RFKILL +	 * or no valid public or static random address apply. +	 */ +	flush_workqueue(hdev->req_workqueue); + +	err = hci_dev_do_open(hdev); + +	hci_dev_put(hdev); + +	return err; +} +  static int hci_dev_do_close(struct hci_dev *hdev)  {  	BT_DBG("%s %p", hdev->name, hdev); @@ -1247,6 +2445,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)  		cancel_delayed_work(&hdev->discov_off);  		hdev->discov_timeout = 0;  		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); +		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);  	}  	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) @@ -1254,9 +2453,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)  	cancel_delayed_work_sync(&hdev->le_scan_disable); +	if (test_bit(HCI_MGMT, &hdev->dev_flags)) +		cancel_delayed_work_sync(&hdev->rpa_expired); +  	hci_dev_lock(hdev);  	hci_inquiry_cache_flush(hdev);  	hci_conn_hash_flush(hdev); +	hci_pend_le_conns_clear(hdev);  	hci_dev_unlock(hdev);  	hci_notify(hdev, HCI_DEV_DOWN); @@ -1268,6 +2471,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)  	skb_queue_purge(&hdev->cmd_q);  	atomic_set(&hdev->cmd_cnt, 1);  	if (!test_bit(HCI_RAW, &hdev->flags) && +	    !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&  	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {  		set_bit(HCI_INIT, &hdev->flags);  		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); @@ -1300,18 +2504,20 @@ static int hci_dev_do_close(struct hci_dev *hdev)  	hdev->flags = 0;  	hdev->dev_flags &= ~HCI_PERSISTENT_MASK; -	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) && -	    mgmt_valid_hdev(hdev)) { -		hci_dev_lock(hdev); -		mgmt_powered(hdev, 0); -		hci_dev_unlock(hdev); +	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { +		if (hdev->dev_type == HCI_BREDR) { +			hci_dev_lock(hdev); +			mgmt_powered(hdev, 0); +			hci_dev_unlock(hdev); +		}  	}  	/* Controller radio is available but is currently powered down */ -	hdev->amp_status = 0; +	hdev->amp_status = AMP_STATUS_POWERED_DOWN;  	memset(hdev->eir, 0, sizeof(hdev->eir));  	memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); +	bacpy(&hdev->random_addr, BDADDR_ANY);  	hci_req_unlock(hdev); @@ -1328,11 +2534,17 @@ int hci_dev_close(__u16 dev)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		err = -EBUSY; +		goto done; +	} +  	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))  		cancel_delayed_work(&hdev->power_off);  	err = hci_dev_do_close(hdev); +done:  	hci_dev_put(hdev);  	return err;  } @@ -1348,8 +2560,15 @@ int hci_dev_reset(__u16 dev)  	hci_req_lock(hdev); -	if (!test_bit(HCI_UP, &hdev->flags)) +	if (!test_bit(HCI_UP, &hdev->flags)) { +		ret = -ENETDOWN; +		goto done; +	} + +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		ret = -EBUSY;  		goto done; +	}  	/* Drop queues */  	skb_queue_purge(&hdev->rx_q); @@ -1384,10 +2603,15 @@ int hci_dev_reset_stat(__u16 dev)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		ret = -EBUSY; +		goto done; +	} +  	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); +done:  	hci_dev_put(hdev); -  	return ret;  } @@ -1404,6 +2628,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)  	if (!hdev)  		return -ENODEV; +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		err = -EBUSY; +		goto done; +	} + +	if (hdev->dev_type != HCI_BREDR) { +		err = -EOPNOTSUPP; +		goto done; +	} + +	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { +		err = -EOPNOTSUPP; +		goto done; +	} +  	switch (cmd) {  	case HCISETAUTH:  		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, @@ -1462,6 +2701,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)  		break;  	} +done:  	hci_dev_put(hdev);  	return err;  } @@ -1534,7 +2774,7 @@ int hci_get_dev_info(void __user *arg)  	strcpy(di.name, hdev->name);  	di.bdaddr   = hdev->bdaddr; -	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4); +	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);  	di.flags    = hdev->flags;  	di.pkt_type = hdev->pkt_type;  	if (lmp_bredr_capable(hdev)) { @@ -1570,6 +2810,9 @@ static int hci_rfkill_set_block(void *data, bool blocked)  	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); +	if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) +		return -EBUSY; +  	if (blocked) {  		set_bit(HCI_RFKILLED, &hdev->dev_flags);  		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) @@ -1592,13 +2835,20 @@ static void hci_power_on(struct work_struct *work)  	BT_DBG("%s", hdev->name); -	err = hci_dev_open(hdev->id); +	err = hci_dev_do_open(hdev);  	if (err < 0) {  		mgmt_set_powered_failed(hdev, err);  		return;  	} -	if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { +	/* During the HCI setup phase, a few error conditions are +	 * ignored and they need to be checked now. If they are still +	 * valid, it is important to turn the device back off. +	 */ +	if (test_bit(HCI_RFKILLED, &hdev->dev_flags) || +	    (hdev->dev_type == HCI_BREDR && +	     !bacmp(&hdev->bdaddr, BDADDR_ANY) && +	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {  		clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);  		hci_dev_do_close(hdev);  	} else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { @@ -1623,22 +2873,15 @@ static void hci_power_off(struct work_struct *work)  static void hci_discov_off(struct work_struct *work)  {  	struct hci_dev *hdev; -	u8 scan = SCAN_PAGE;  	hdev = container_of(work, struct hci_dev, discov_off.work);  	BT_DBG("%s", hdev->name); -	hci_dev_lock(hdev); - -	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); - -	hdev->discov_timeout = 0; - -	hci_dev_unlock(hdev); +	mgmt_discoverable_timeout(hdev);  } -int hci_uuids_clear(struct hci_dev *hdev) +void hci_uuids_clear(struct hci_dev *hdev)  {  	struct bt_uuid *uuid, *tmp; @@ -1646,11 +2889,9 @@ int hci_uuids_clear(struct hci_dev *hdev)  		list_del(&uuid->list);  		kfree(uuid);  	} - -	return 0;  } -int hci_link_keys_clear(struct hci_dev *hdev) +void hci_link_keys_clear(struct hci_dev *hdev)  {  	struct list_head *p, *n; @@ -1662,11 +2903,9 @@ int hci_link_keys_clear(struct hci_dev *hdev)  		list_del(p);  		kfree(key);  	} - -	return 0;  } -int hci_smp_ltks_clear(struct hci_dev *hdev) +void hci_smp_ltks_clear(struct hci_dev *hdev)  {  	struct smp_ltk *k, *tmp; @@ -1674,8 +2913,16 @@ int hci_smp_ltks_clear(struct hci_dev *hdev)  		list_del(&k->list);  		kfree(k);  	} +} -	return 0; +void hci_smp_irks_clear(struct hci_dev *hdev) +{ +	struct smp_irk *k, *tmp; + +	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { +		list_del(&k->list); +		kfree(k); +	}  }  struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -1725,13 +2972,24 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,  	return false;  } -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) +static bool ltk_type_master(u8 type) +{ +	if (type == HCI_SMP_STK || type == HCI_SMP_LTK) +		return true; + +	return false; +} + +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, +			     bool master)  {  	struct smp_ltk *k;  	list_for_each_entry(k, &hdev->long_term_keys, list) { -		if (k->ediv != ediv || -		    memcmp(rand, k->rand, sizeof(k->rand))) +		if (k->ediv != ediv || k->rand != rand) +			continue; + +		if (ltk_type_master(k->type) != master)  			continue;  		return k; @@ -1741,18 +2999,56 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])  }  struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, -				     u8 addr_type) +				     u8 addr_type, bool master)  {  	struct smp_ltk *k;  	list_for_each_entry(k, &hdev->long_term_keys, list)  		if (addr_type == k->bdaddr_type && -		    bacmp(bdaddr, &k->bdaddr) == 0) +		    bacmp(bdaddr, &k->bdaddr) == 0 && +		    ltk_type_master(k->type) == master)  			return k;  	return NULL;  } +struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) +{ +	struct smp_irk *irk; + +	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { +		if (!bacmp(&irk->rpa, rpa)) +			return irk; +	} + +	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { +		if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) { +			bacpy(&irk->rpa, rpa); +			return irk; +		} +	} + +	return NULL; +} + +struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, +				     u8 addr_type) +{ +	struct smp_irk *irk; + +	/* Identity Address must be public or static random */ +	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) +		return NULL; + +	list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { +		if (addr_type == irk->addr_type && +		    bacmp(bdaddr, &irk->bdaddr) == 0) +			return irk; +	} + +	return NULL; +} +  int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,  		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)  { @@ -1766,7 +3062,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,  		key = old_key;  	} else {  		old_key_type = conn ? conn->key_type : 0xff; -		key = kzalloc(sizeof(*key), GFP_ATOMIC); +		key = kzalloc(sizeof(*key), GFP_KERNEL);  		if (!key)  			return -ENOMEM;  		list_add(&key->list, &hdev->link_keys); @@ -1806,22 +3102,20 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,  	return 0;  } -int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, -		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16 -		ediv, u8 rand[8]) +struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, +			    u8 addr_type, u8 type, u8 authenticated, +			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)  {  	struct smp_ltk *key, *old_key; +	bool master = ltk_type_master(type); -	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK)) -		return 0; - -	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type); +	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);  	if (old_key)  		key = old_key;  	else { -		key = kzalloc(sizeof(*key), GFP_ATOMIC); +		key = kzalloc(sizeof(*key), GFP_KERNEL);  		if (!key) -			return -ENOMEM; +			return NULL;  		list_add(&key->list, &hdev->long_term_keys);  	} @@ -1830,17 +3124,34 @@ int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,  	memcpy(key->val, tk, sizeof(key->val));  	key->authenticated = authenticated;  	key->ediv = ediv; +	key->rand = rand;  	key->enc_size = enc_size;  	key->type = type; -	memcpy(key->rand, rand, sizeof(key->rand)); -	if (!new_key) -		return 0; +	return key; +} + +struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, +			    u8 addr_type, u8 val[16], bdaddr_t *rpa) +{ +	struct smp_irk *irk; -	if (type & HCI_SMP_LTK) -		mgmt_new_ltk(hdev, key, 1); +	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); +	if (!irk) { +		irk = kzalloc(sizeof(*irk), GFP_KERNEL); +		if (!irk) +			return NULL; -	return 0; +		bacpy(&irk->bdaddr, bdaddr); +		irk->addr_type = addr_type; + +		list_add(&irk->list, &hdev->identity_resolving_keys); +	} + +	memcpy(irk->val, val, 16); +	bacpy(&irk->rpa, rpa); + +	return irk;  }  int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -1859,21 +3170,38 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)  	return 0;  } -int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr) +int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)  {  	struct smp_ltk *k, *tmp; +	int removed = 0;  	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { -		if (bacmp(bdaddr, &k->bdaddr)) +		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)  			continue;  		BT_DBG("%s removing %pMR", hdev->name, bdaddr);  		list_del(&k->list);  		kfree(k); +		removed++;  	} -	return 0; +	return removed ? 0 : -ENOENT; +} + +void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) +{ +	struct smp_irk *k, *tmp; + +	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { +		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) +			continue; + +		BT_DBG("%s removing %pMR", hdev->name, bdaddr); + +		list_del(&k->list); +		kfree(k); +	}  }  /* HCI command timer function */ @@ -1922,7 +3250,7 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)  	return 0;  } -int hci_remote_oob_data_clear(struct hci_dev *hdev) +void hci_remote_oob_data_clear(struct hci_dev *hdev)  {  	struct oob_data *data, *n; @@ -1930,19 +3258,43 @@ int hci_remote_oob_data_clear(struct hci_dev *hdev)  		list_del(&data->list);  		kfree(data);  	} +} + +int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, +			    u8 *hash, u8 *randomizer) +{ +	struct oob_data *data; + +	data = hci_find_remote_oob_data(hdev, bdaddr); +	if (!data) { +		data = kmalloc(sizeof(*data), GFP_KERNEL); +		if (!data) +			return -ENOMEM; + +		bacpy(&data->bdaddr, bdaddr); +		list_add(&data->list, &hdev->remote_oob_data); +	} + +	memcpy(data->hash192, hash, sizeof(data->hash192)); +	memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192)); + +	memset(data->hash256, 0, sizeof(data->hash256)); +	memset(data->randomizer256, 0, sizeof(data->randomizer256)); + +	BT_DBG("%s for %pMR", hdev->name, bdaddr);  	return 0;  } -int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, -			    u8 *randomizer) +int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, +				u8 *hash192, u8 *randomizer192, +				u8 *hash256, u8 *randomizer256)  {  	struct oob_data *data;  	data = hci_find_remote_oob_data(hdev, bdaddr); -  	if (!data) { -		data = kmalloc(sizeof(*data), GFP_ATOMIC); +		data = kmalloc(sizeof(*data), GFP_KERNEL);  		if (!data)  			return -ENOMEM; @@ -1950,49 +3302,50 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,  		list_add(&data->list, &hdev->remote_oob_data);  	} -	memcpy(data->hash, hash, sizeof(data->hash)); -	memcpy(data->randomizer, randomizer, sizeof(data->randomizer)); +	memcpy(data->hash192, hash192, sizeof(data->hash192)); +	memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192)); + +	memcpy(data->hash256, hash256, sizeof(data->hash256)); +	memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));  	BT_DBG("%s for %pMR", hdev->name, bdaddr);  	return 0;  } -struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, +					 bdaddr_t *bdaddr, u8 type)  {  	struct bdaddr_list *b; -	list_for_each_entry(b, &hdev->blacklist, list) -		if (bacmp(bdaddr, &b->bdaddr) == 0) +	list_for_each_entry(b, &hdev->blacklist, list) { +		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)  			return b; +	}  	return NULL;  } -int hci_blacklist_clear(struct hci_dev *hdev) +static void hci_blacklist_clear(struct hci_dev *hdev)  {  	struct list_head *p, *n;  	list_for_each_safe(p, n, &hdev->blacklist) { -		struct bdaddr_list *b; - -		b = list_entry(p, struct bdaddr_list, list); +		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);  		list_del(p);  		kfree(b);  	} - -	return 0;  }  int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  {  	struct bdaddr_list *entry; -	if (bacmp(bdaddr, BDADDR_ANY) == 0) +	if (!bacmp(bdaddr, BDADDR_ANY))  		return -EBADF; -	if (hci_blacklist_lookup(hdev, bdaddr)) +	if (hci_blacklist_lookup(hdev, bdaddr, type))  		return -EEXIST;  	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); @@ -2000,6 +3353,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  		return -ENOMEM;  	bacpy(&entry->bdaddr, bdaddr); +	entry->bdaddr_type = type;  	list_add(&entry->list, &hdev->blacklist); @@ -2010,10 +3364,12 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  {  	struct bdaddr_list *entry; -	if (bacmp(bdaddr, BDADDR_ANY) == 0) -		return hci_blacklist_clear(hdev); +	if (!bacmp(bdaddr, BDADDR_ANY)) { +		hci_blacklist_clear(hdev); +		return 0; +	} -	entry = hci_blacklist_lookup(hdev, bdaddr); +	entry = hci_blacklist_lookup(hdev, bdaddr, type);  	if (!entry)  		return -ENOENT; @@ -2023,6 +3379,262 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)  	return mgmt_device_unblocked(hdev, bdaddr, type);  } +struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev, +					  bdaddr_t *bdaddr, u8 type) +{ +	struct bdaddr_list *b; + +	list_for_each_entry(b, &hdev->le_white_list, list) { +		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) +			return b; +	} + +	return NULL; +} + +void hci_white_list_clear(struct hci_dev *hdev) +{ +	struct list_head *p, *n; + +	list_for_each_safe(p, n, &hdev->le_white_list) { +		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); + +		list_del(p); +		kfree(b); +	} +} + +int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ +	struct bdaddr_list *entry; + +	if (!bacmp(bdaddr, BDADDR_ANY)) +		return -EBADF; + +	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); +	if (!entry) +		return -ENOMEM; + +	bacpy(&entry->bdaddr, bdaddr); +	entry->bdaddr_type = type; + +	list_add(&entry->list, &hdev->le_white_list); + +	return 0; +} + +int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ +	struct bdaddr_list *entry; + +	if (!bacmp(bdaddr, BDADDR_ANY)) +		return -EBADF; + +	entry = hci_white_list_lookup(hdev, bdaddr, type); +	if (!entry) +		return -ENOENT; + +	list_del(&entry->list); +	kfree(entry); + +	return 0; +} + +/* This function requires the caller holds hdev->lock */ +struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, +					       bdaddr_t *addr, u8 addr_type) +{ +	struct hci_conn_params *params; + +	list_for_each_entry(params, &hdev->le_conn_params, list) { +		if (bacmp(¶ms->addr, addr) == 0 && +		    params->addr_type == addr_type) { +			return params; +		} +	} + +	return NULL; +} + +static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +{ +	struct hci_conn *conn; + +	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); +	if (!conn) +		return false; + +	if (conn->dst_type != type) +		return false; + +	if (conn->state != BT_CONNECTED) +		return false; + +	return true; +} + +static bool is_identity_address(bdaddr_t *addr, u8 addr_type) +{ +	if (addr_type == ADDR_LE_DEV_PUBLIC) +		return true; + +	/* Check for Random Static address type */ +	if ((addr->b[5] & 0xc0) == 0xc0) +		return true; + +	return false; +} + +/* This function requires the caller holds hdev->lock */ +int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, +			u8 auto_connect, u16 conn_min_interval, +			u16 conn_max_interval) +{ +	struct hci_conn_params *params; + +	if (!is_identity_address(addr, addr_type)) +		return -EINVAL; + +	params = hci_conn_params_lookup(hdev, addr, addr_type); +	if (params) +		goto update; + +	params = kzalloc(sizeof(*params), GFP_KERNEL); +	if (!params) { +		BT_ERR("Out of memory"); +		return -ENOMEM; +	} + +	bacpy(¶ms->addr, addr); +	params->addr_type = addr_type; + +	list_add(¶ms->list, &hdev->le_conn_params); + +update: +	params->conn_min_interval = conn_min_interval; +	params->conn_max_interval = conn_max_interval; +	params->auto_connect = auto_connect; + +	switch (auto_connect) { +	case HCI_AUTO_CONN_DISABLED: +	case HCI_AUTO_CONN_LINK_LOSS: +		hci_pend_le_conn_del(hdev, addr, addr_type); +		break; +	case HCI_AUTO_CONN_ALWAYS: +		if (!is_connected(hdev, addr, addr_type)) +			hci_pend_le_conn_add(hdev, addr, addr_type); +		break; +	} + +	BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x " +	       "conn_max_interval 0x%.4x", addr, addr_type, auto_connect, +	       conn_min_interval, conn_max_interval); + +	return 0; +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ +	struct hci_conn_params *params; + +	params = hci_conn_params_lookup(hdev, addr, addr_type); +	if (!params) +		return; + +	hci_pend_le_conn_del(hdev, addr, addr_type); + +	list_del(¶ms->list); +	kfree(params); + +	BT_DBG("addr %pMR (type %u)", addr, addr_type); +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_clear(struct hci_dev *hdev) +{ +	struct hci_conn_params *params, *tmp; + +	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { +		list_del(¶ms->list); +		kfree(params); +	} + +	BT_DBG("All LE connection parameters were removed"); +} + +/* This function requires the caller holds hdev->lock */ +struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev, +					    bdaddr_t *addr, u8 addr_type) +{ +	struct bdaddr_list *entry; + +	list_for_each_entry(entry, &hdev->pend_le_conns, list) { +		if (bacmp(&entry->bdaddr, addr) == 0 && +		    entry->bdaddr_type == addr_type) +			return entry; +	} + +	return NULL; +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ +	struct bdaddr_list *entry; + +	entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); +	if (entry) +		goto done; + +	entry = kzalloc(sizeof(*entry), GFP_KERNEL); +	if (!entry) { +		BT_ERR("Out of memory"); +		return; +	} + +	bacpy(&entry->bdaddr, addr); +	entry->bdaddr_type = addr_type; + +	list_add(&entry->list, &hdev->pend_le_conns); + +	BT_DBG("addr %pMR (type %u)", addr, addr_type); + +done: +	hci_update_background_scan(hdev); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ +	struct bdaddr_list *entry; + +	entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); +	if (!entry) +		goto done; + +	list_del(&entry->list); +	kfree(entry); + +	BT_DBG("addr %pMR (type %u)", addr, addr_type); + +done: +	hci_update_background_scan(hdev); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_conns_clear(struct hci_dev *hdev) +{ +	struct bdaddr_list *entry, *tmp; + +	list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) { +		list_del(&entry->list); +		kfree(entry); +	} + +	BT_DBG("All LE pending connections cleared"); +} +  static void inquiry_complete(struct hci_dev *hdev, u8 status)  {  	if (status) { @@ -2082,7 +3694,6 @@ static void le_scan_disable_work(struct work_struct *work)  {  	struct hci_dev *hdev = container_of(work, struct hci_dev,  					    le_scan_disable.work); -	struct hci_cp_le_set_scan_enable cp;  	struct hci_request req;  	int err; @@ -2090,15 +3701,128 @@ static void le_scan_disable_work(struct work_struct *work)  	hci_req_init(&req, hdev); -	memset(&cp, 0, sizeof(cp)); -	cp.enable = LE_SCAN_DISABLE; -	hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); +	hci_req_add_le_scan_disable(&req);  	err = hci_req_run(&req, le_scan_disable_work_complete);  	if (err)  		BT_ERR("Disable LE scanning request failed: err %d", err);  } +static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) +{ +	struct hci_dev *hdev = req->hdev; + +	/* If we're advertising or initiating an LE connection we can't +	 * go ahead and change the random address at this time. This is +	 * because the eventual initiator address used for the +	 * subsequently created connection will be undefined (some +	 * controllers use the new address and others the one we had +	 * when the operation started). +	 * +	 * In this kind of scenario skip the update and let the random +	 * address be updated at the next cycle. +	 */ +	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) || +	    hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { +		BT_DBG("Deferring random address update"); +		return; +	} + +	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); +} + +int hci_update_random_address(struct hci_request *req, bool require_privacy, +			      u8 *own_addr_type) +{ +	struct hci_dev *hdev = req->hdev; +	int err; + +	/* If privacy is enabled use a resolvable private address. If +	 * current RPA has expired or there is something else than +	 * the current RPA in use, then generate a new one. +	 */ +	if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { +		int to; + +		*own_addr_type = ADDR_LE_DEV_RANDOM; + +		if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) && +		    !bacmp(&hdev->random_addr, &hdev->rpa)) +			return 0; + +		err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa); +		if (err < 0) { +			BT_ERR("%s failed to generate new RPA", hdev->name); +			return err; +		} + +		set_random_addr(req, &hdev->rpa); + +		to = msecs_to_jiffies(hdev->rpa_timeout * 1000); +		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); + +		return 0; +	} + +	/* In case of required privacy without resolvable private address, +	 * use an unresolvable private address. This is useful for active +	 * scanning and non-connectable advertising. +	 */ +	if (require_privacy) { +		bdaddr_t urpa; + +		get_random_bytes(&urpa, 6); +		urpa.b[5] &= 0x3f;	/* Clear two most significant bits */ + +		*own_addr_type = ADDR_LE_DEV_RANDOM; +		set_random_addr(req, &urpa); +		return 0; +	} + +	/* If forcing static address is in use or there is no public +	 * address use the static address as random address (but skip +	 * the HCI command if the current random address is already the +	 * static one. +	 */ +	if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || +	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) { +		*own_addr_type = ADDR_LE_DEV_RANDOM; +		if (bacmp(&hdev->static_addr, &hdev->random_addr)) +			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, +				    &hdev->static_addr); +		return 0; +	} + +	/* Neither privacy nor static address is being used so use a +	 * public address. +	 */ +	*own_addr_type = ADDR_LE_DEV_PUBLIC; + +	return 0; +} + +/* Copy the Identity Address of the controller. + * + * If the controller has a public BD_ADDR, then by default use that one. + * If this is a LE only controller without a public address, default to + * the static random address. + * + * For debugging purposes it is possible to force controllers with a + * public address to use the static random address instead. + */ +void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, +			       u8 *bdaddr_type) +{ +	if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || +	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) { +		bacpy(bdaddr, &hdev->static_addr); +		*bdaddr_type = ADDR_LE_DEV_RANDOM; +	} else { +		bacpy(bdaddr, &hdev->bdaddr); +		*bdaddr_type = ADDR_LE_DEV_PUBLIC; +	} +} +  /* Alloc HCI device */  struct hci_dev *hci_alloc_dev(void)  { @@ -2111,13 +3835,25 @@ struct hci_dev *hci_alloc_dev(void)  	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);  	hdev->esco_type = (ESCO_HV1);  	hdev->link_mode = (HCI_LM_ACCEPT); -	hdev->io_capability = 0x03; /* No Input No Output */ +	hdev->num_iac = 0x01;		/* One IAC support is mandatory */ +	hdev->io_capability = 0x03;	/* No Input No Output */  	hdev->inq_tx_power = HCI_TX_POWER_INVALID;  	hdev->adv_tx_power = HCI_TX_POWER_INVALID;  	hdev->sniff_max_interval = 800;  	hdev->sniff_min_interval = 80; +	hdev->le_adv_channel_map = 0x07; +	hdev->le_scan_interval = 0x0060; +	hdev->le_scan_window = 0x0030; +	hdev->le_conn_min_interval = 0x0028; +	hdev->le_conn_max_interval = 0x0038; + +	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; +	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; +	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; +	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; +  	mutex_init(&hdev->lock);  	mutex_init(&hdev->req_lock); @@ -2126,7 +3862,11 @@ struct hci_dev *hci_alloc_dev(void)  	INIT_LIST_HEAD(&hdev->uuids);  	INIT_LIST_HEAD(&hdev->link_keys);  	INIT_LIST_HEAD(&hdev->long_term_keys); +	INIT_LIST_HEAD(&hdev->identity_resolving_keys);  	INIT_LIST_HEAD(&hdev->remote_oob_data); +	INIT_LIST_HEAD(&hdev->le_white_list); +	INIT_LIST_HEAD(&hdev->le_conn_params); +	INIT_LIST_HEAD(&hdev->pend_le_conns);  	INIT_LIST_HEAD(&hdev->conn_hash.list);  	INIT_WORK(&hdev->rx_work, hci_rx_work); @@ -2206,9 +3946,23 @@ int hci_register_dev(struct hci_dev *hdev)  		goto err;  	} -	error = hci_add_sysfs(hdev); -	if (error < 0) +	if (!IS_ERR_OR_NULL(bt_debugfs)) +		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); + +	dev_set_name(&hdev->dev, "%s", hdev->name); + +	hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, +					       CRYPTO_ALG_ASYNC); +	if (IS_ERR(hdev->tfm_aes)) { +		BT_ERR("Unable to create crypto context"); +		error = PTR_ERR(hdev->tfm_aes); +		hdev->tfm_aes = NULL;  		goto err_wqueue; +	} + +	error = device_add(&hdev->dev); +	if (error < 0) +		goto err_tfm;  	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,  				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, @@ -2224,9 +3978,14 @@ int hci_register_dev(struct hci_dev *hdev)  		set_bit(HCI_RFKILLED, &hdev->dev_flags);  	set_bit(HCI_SETUP, &hdev->dev_flags); +	set_bit(HCI_AUTO_OFF, &hdev->dev_flags); -	if (hdev->dev_type != HCI_AMP) -		set_bit(HCI_AUTO_OFF, &hdev->dev_flags); +	if (hdev->dev_type == HCI_BREDR) { +		/* Assume BR/EDR support until proven otherwise (such as +		 * through reading supported features during init. +		 */ +		set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); +	}  	write_lock(&hci_dev_list_lock);  	list_add(&hdev->list, &hci_dev_list); @@ -2239,6 +3998,8 @@ int hci_register_dev(struct hci_dev *hdev)  	return id; +err_tfm: +	crypto_free_blkcipher(hdev->tfm_aes);  err_wqueue:  	destroy_workqueue(hdev->workqueue);  	destroy_workqueue(hdev->req_workqueue); @@ -2289,7 +4050,12 @@ void hci_unregister_dev(struct hci_dev *hdev)  		rfkill_destroy(hdev->rfkill);  	} -	hci_del_sysfs(hdev); +	if (hdev->tfm_aes) +		crypto_free_blkcipher(hdev->tfm_aes); + +	device_del(&hdev->dev); + +	debugfs_remove_recursive(hdev->debugfs);  	destroy_workqueue(hdev->workqueue);  	destroy_workqueue(hdev->req_workqueue); @@ -2299,7 +4065,11 @@ void hci_unregister_dev(struct hci_dev *hdev)  	hci_uuids_clear(hdev);  	hci_link_keys_clear(hdev);  	hci_smp_ltks_clear(hdev); +	hci_smp_irks_clear(hdev);  	hci_remote_oob_data_clear(hdev); +	hci_white_list_clear(hdev); +	hci_conn_params_clear(hdev); +	hci_pend_le_conns_clear(hdev);  	hci_dev_unlock(hdev);  	hci_dev_put(hdev); @@ -2325,9 +4095,8 @@ int hci_resume_dev(struct hci_dev *hdev)  EXPORT_SYMBOL(hci_resume_dev);  /* Receive frame from HCI drivers */ -int hci_recv_frame(struct sk_buff *skb) +int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)  { -	struct hci_dev *hdev = (struct hci_dev *) skb->dev;  	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)  		      && !test_bit(HCI_INIT, &hdev->flags))) {  		kfree_skb(skb); @@ -2386,7 +4155,6 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,  		scb->expect = hlen;  		scb->pkt_type = type; -		skb->dev = (void *) hdev;  		hdev->reassembly[index] = skb;  	} @@ -2446,7 +4214,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,  			/* Complete frame */  			bt_cb(skb)->pkt_type = type; -			hci_recv_frame(skb); +			hci_recv_frame(hdev, skb);  			hdev->reassembly[index] = NULL;  			return remain; @@ -2537,15 +4305,8 @@ int hci_unregister_cb(struct hci_cb *cb)  }  EXPORT_SYMBOL(hci_unregister_cb); -static int hci_send_frame(struct sk_buff *skb) +static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)  { -	struct hci_dev *hdev = (struct hci_dev *) skb->dev; - -	if (!hdev) { -		kfree_skb(skb); -		return -ENODEV; -	} -  	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);  	/* Time stamp */ @@ -2562,7 +4323,8 @@ static int hci_send_frame(struct sk_buff *skb)  	/* Get rid of skb owner, prior to sending to the driver. */  	skb_orphan(skb); -	return hdev->send(skb); +	if (hdev->send(hdev, skb) < 0) +		BT_ERR("%s sending frame failed", hdev->name);  }  void hci_req_init(struct hci_request *req, struct hci_dev *hdev) @@ -2625,7 +4387,6 @@ static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,  	BT_DBG("skb len %d", skb->len);  	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; -	skb->dev = (void *) hdev;  	return skb;  } @@ -2769,7 +4530,6 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,  		do {  			skb = list; list = list->next; -			skb->dev = (void *) hdev;  			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;  			hci_add_acl_hdr(skb, conn->handle, flags); @@ -2788,8 +4548,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)  	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); -	skb->dev = (void *) hdev; -  	hci_queue_acl(chan, &chan->data_q, skb, flags);  	queue_work(hdev->workqueue, &hdev->tx_work); @@ -2810,7 +4568,6 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)  	skb_reset_transport_header(skb);  	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); -	skb->dev = (void *) hdev;  	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;  	skb_queue_tail(&conn->data_q, skb); @@ -3075,7 +4832,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)  			hci_conn_enter_active_mode(chan->conn,  						   bt_cb(skb)->force_active); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			hdev->acl_last_tx = jiffies;  			hdev->acl_cnt--; @@ -3127,7 +4884,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)  			hci_conn_enter_active_mode(chan->conn,  						   bt_cb(skb)->force_active); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			hdev->acl_last_tx = jiffies;  			hdev->block_cnt -= blocks; @@ -3180,7 +4937,7 @@ static void hci_sched_sco(struct hci_dev *hdev)  	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {  		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {  			BT_DBG("skb %p len %d", skb, skb->len); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			conn->sent++;  			if (conn->sent == ~0) @@ -3204,7 +4961,7 @@ static void hci_sched_esco(struct hci_dev *hdev)  						     "e))) {  		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {  			BT_DBG("skb %p len %d", skb, skb->len); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			conn->sent++;  			if (conn->sent == ~0) @@ -3246,7 +5003,7 @@ static void hci_sched_le(struct hci_dev *hdev)  			skb = skb_dequeue(&chan->data_q); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			hdev->le_last_tx = jiffies;  			cnt--; @@ -3272,19 +5029,17 @@ static void hci_tx_work(struct work_struct *work)  	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,  	       hdev->sco_cnt, hdev->le_cnt); -	/* Schedule queues and send stuff to HCI driver */ - -	hci_sched_acl(hdev); - -	hci_sched_sco(hdev); - -	hci_sched_esco(hdev); - -	hci_sched_le(hdev); +	if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { +		/* Schedule queues and send stuff to HCI driver */ +		hci_sched_acl(hdev); +		hci_sched_sco(hdev); +		hci_sched_esco(hdev); +		hci_sched_le(hdev); +	}  	/* Send next queued raw (unknown type) packet */  	while ((skb = skb_dequeue(&hdev->raw_q))) -		hci_send_frame(skb); +		hci_send_frame(hdev, skb);  }  /* ----- HCI RX task (incoming data processing) ----- */ @@ -3471,7 +5226,8 @@ static void hci_rx_work(struct work_struct *work)  			hci_send_to_sock(hdev, skb);  		} -		if (test_bit(HCI_RAW, &hdev->flags)) { +		if (test_bit(HCI_RAW, &hdev->flags) || +		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {  			kfree_skb(skb);  			continue;  		} @@ -3526,10 +5282,10 @@ static void hci_cmd_work(struct work_struct *work)  		kfree_skb(hdev->sent_cmd); -		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); +		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);  		if (hdev->sent_cmd) {  			atomic_dec(&hdev->cmd_cnt); -			hci_send_frame(skb); +			hci_send_frame(hdev, skb);  			if (test_bit(HCI_RESET, &hdev->flags))  				del_timer(&hdev->cmd_timer);  			else @@ -3542,14 +5298,103 @@ static void hci_cmd_work(struct work_struct *work)  	}  } -u8 bdaddr_to_le(u8 bdaddr_type) +void hci_req_add_le_scan_disable(struct hci_request *req)  { -	switch (bdaddr_type) { -	case BDADDR_LE_PUBLIC: -		return ADDR_LE_DEV_PUBLIC; +	struct hci_cp_le_set_scan_enable cp; -	default: -		/* Fallback to LE Random address type */ -		return ADDR_LE_DEV_RANDOM; +	memset(&cp, 0, sizeof(cp)); +	cp.enable = LE_SCAN_DISABLE; +	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); +} + +void hci_req_add_le_passive_scan(struct hci_request *req) +{ +	struct hci_cp_le_set_scan_param param_cp; +	struct hci_cp_le_set_scan_enable enable_cp; +	struct hci_dev *hdev = req->hdev; +	u8 own_addr_type; + +	/* Set require_privacy to true to avoid identification from +	 * unknown peer devices. Since this is passive scanning, no +	 * SCAN_REQ using the local identity should be sent. Mandating +	 * privacy is just an extra precaution. +	 */ +	if (hci_update_random_address(req, true, &own_addr_type)) +		return; + +	memset(¶m_cp, 0, sizeof(param_cp)); +	param_cp.type = LE_SCAN_PASSIVE; +	param_cp.interval = cpu_to_le16(hdev->le_scan_interval); +	param_cp.window = cpu_to_le16(hdev->le_scan_window); +	param_cp.own_address_type = own_addr_type; +	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), +		    ¶m_cp); + +	memset(&enable_cp, 0, sizeof(enable_cp)); +	enable_cp.enable = LE_SCAN_ENABLE; +	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; +	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), +		    &enable_cp); +} + +static void update_background_scan_complete(struct hci_dev *hdev, u8 status) +{ +	if (status) +		BT_DBG("HCI request failed to update background scanning: " +		       "status 0x%2.2x", status); +} + +/* This function controls the background scanning based on hdev->pend_le_conns + * list. If there are pending LE connection we start the background scanning, + * otherwise we stop it. + * + * This function requires the caller holds hdev->lock. + */ +void hci_update_background_scan(struct hci_dev *hdev) +{ +	struct hci_request req; +	struct hci_conn *conn; +	int err; + +	hci_req_init(&req, hdev); + +	if (list_empty(&hdev->pend_le_conns)) { +		/* If there is no pending LE connections, we should stop +		 * the background scanning. +		 */ + +		/* If controller is not scanning we are done. */ +		if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) +			return; + +		hci_req_add_le_scan_disable(&req); + +		BT_DBG("%s stopping background scanning", hdev->name); +	} else { +		/* If there is at least one pending LE connection, we should +		 * keep the background scan running. +		 */ + +		/* If controller is connecting, we should not start scanning +		 * since some controllers are not able to scan and connect at +		 * the same time. +		 */ +		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); +		if (conn) +			return; + +		/* If controller is currently scanning, we stop it to ensure we +		 * don't miss any advertising (due to duplicates filter). +		 */ +		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) +			hci_req_add_le_scan_disable(&req); + +		hci_req_add_le_passive_scan(&req); + +		BT_DBG("%s starting background scanning", hdev->name);  	} + +	err = hci_req_run(&req, update_background_scan_complete); +	if (err) +		BT_ERR("Failed to run HCI request: err %d", err);  }  | 
