diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k')
27 files changed, 8921 insertions, 3534 deletions
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 82e8088ca9b..a6f5285235a 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -37,3 +37,10 @@ config ATH10K_TRACING  	---help---  	  Select this to ath10k use tracing infrastructure. +config ATH10K_DFS_CERTIFIED +	bool "Atheros DFS support for certified platforms" +	depends on ATH10K && CFG80211_CERTIFICATION_ONUS +	default n +	---help--- +	This option enables DFS support for initiating radiation on +	ath10k. diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 744da6d1c40..17d221abd58 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -22,7 +22,8 @@  void ath10k_bmi_start(struct ath10k *ar)  { -	ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n"); +	ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n"); +  	ar->bmi.done_sent = false;  } @@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)  	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n"); +  	if (ar->bmi.done_sent) { -		ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__); +		ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");  		return 0;  	} @@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)  		return ret;  	} -	ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");  	return 0;  } @@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,  	u32 resplen = sizeof(resp.get_target_info);  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n"); +  	if (ar->bmi.done_sent) {  		ath10k_warn("BMI Get Target Info Command disallowed\n");  		return -EBUSY; @@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,  	target_info->version = __le32_to_cpu(resp.get_target_info.version);  	target_info->type    = __le32_to_cpu(resp.get_target_info.type); +  	return 0;  } @@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,  	u32 rxlen;  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", +		   address, length); +  	if (ar->bmi.done_sent) {  		ath10k_warn("command disallowed\n");  		return -EBUSY;  	} -	ath10k_dbg(ATH10K_DBG_CORE, -		   "%s: (device: 0x%p, address: 0x%x, length: %d)\n", -		   __func__, ar, address, length); -  	while (length) {  		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); @@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,  	u32 txlen;  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", +		   address, length); +  	if (ar->bmi.done_sent) {  		ath10k_warn("command disallowed\n");  		return -EBUSY;  	} -	ath10k_dbg(ATH10K_DBG_CORE, -		   "%s: (device: 0x%p, address: 0x%x, length: %d)\n", -		   __func__, ar, address, length); -  	while (length) {  		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); @@ -172,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,  	return 0;  } -int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) +int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)  {  	struct bmi_cmd cmd;  	union bmi_resp resp; @@ -180,18 +183,17 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)  	u32 resplen = sizeof(resp.execute);  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", +		   address, param); +  	if (ar->bmi.done_sent) {  		ath10k_warn("command disallowed\n");  		return -EBUSY;  	} -	ath10k_dbg(ATH10K_DBG_CORE, -		   "%s: (device: 0x%p, address: 0x%x, param: %d)\n", -		   __func__, ar, address, *param); -  	cmd.id            = __cpu_to_le32(BMI_EXECUTE);  	cmd.execute.addr  = __cpu_to_le32(address); -	cmd.execute.param = __cpu_to_le32(*param); +	cmd.execute.param = __cpu_to_le32(param);  	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);  	if (ret) { @@ -202,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)  	if (resplen < sizeof(resp.execute)) {  		ath10k_warn("invalid execute response length (%d)\n",  			    resplen); -		return ret; +		return -EIO;  	} -	*param = __le32_to_cpu(resp.execute.result); +	*result = __le32_to_cpu(resp.execute.result); + +	ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); +  	return 0;  } @@ -216,6 +221,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)  	u32 txlen;  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", +		   buffer, length); +  	if (ar->bmi.done_sent) {  		ath10k_warn("command disallowed\n");  		return -EBUSY; @@ -250,6 +258,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)  	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", +		   address); +  	if (ar->bmi.done_sent) {  		ath10k_warn("command disallowed\n");  		return -EBUSY; @@ -275,6 +286,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,  	u32 trailer_len = length - head_len;  	int ret; +	ath10k_dbg(ATH10K_DBG_BMI, +		   "bmi fast download address 0x%x buffer 0x%p length %d\n", +		   address, buffer, length); +  	ret = ath10k_bmi_lz_stream_start(ar, address);  	if (ret)  		return ret; diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 8d81ce1cec2..111ab701465 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -201,7 +201,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,  									\  		addr = host_interest_item_address(HI_ITEM(item));	\  		ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \ -		*val = __le32_to_cpu(tmp);				\ +		if (!ret)						\ +			*val = __le32_to_cpu(tmp);			\  		ret;							\  	 }) @@ -217,7 +218,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,  		ret;							\  	}) -int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param); +int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);  int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);  int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);  int ath10k_bmi_fast_download(struct ath10k *ar, u32 address, diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index f8b969f518f..d185dc0cd12 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,  						      u32 ce_ctrl_addr,  						      unsigned int n)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	void __iomem *indicator_addr; - -	if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) { -		ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); -		return; -	} - -	/* workaround for QCA988x_1.0 HW CE */ -	indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS; - -	if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) { -		iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr); -	} else { -		unsigned long irq_flags; -		local_irq_save(irq_flags); -		iowrite32(1, indicator_addr); - -		/* -		 * PCIE write waits for ACK in IPQ8K, there is no -		 * need to read back value. -		 */ -		(void)ioread32(indicator_addr); -		(void)ioread32(indicator_addr); /* conservative */ - -		ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); - -		iowrite32(0, indicator_addr); -		local_irq_restore(irq_flags); -	} +	ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);  }  static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, @@ -272,6 +243,16 @@ static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,  			   misc_ie_addr | CE_ERROR_MASK);  } +static inline void ath10k_ce_error_intr_disable(struct ath10k *ar, +						u32 ce_ctrl_addr) +{ +	u32 misc_ie_addr = ath10k_pci_read32(ar, +					     ce_ctrl_addr + MISC_IE_ADDRESS); + +	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, +			   misc_ie_addr & ~CE_ERROR_MASK); +} +  static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,  						     u32 ce_ctrl_addr,  						     unsigned int mask) @@ -285,15 +266,15 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,   * ath10k_ce_sendlist_send.   * The caller takes responsibility for any needed locking.   */ -static int ath10k_ce_send_nolock(struct ce_state *ce_state, -				 void *per_transfer_context, -				 u32 buffer, -				 unsigned int nbytes, -				 unsigned int transfer_id, -				 unsigned int flags) +int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, +			  void *per_transfer_context, +			  u32 buffer, +			  unsigned int nbytes, +			  unsigned int transfer_id, +			  unsigned int flags)  {  	struct ath10k *ar = ce_state->ar; -	struct ce_ring_state *src_ring = ce_state->src_ring; +	struct ath10k_ce_ring *src_ring = ce_state->src_ring;  	struct ce_desc *desc, *sdesc;  	unsigned int nentries_mask = src_ring->nentries_mask;  	unsigned int sw_index = src_ring->sw_index; @@ -306,11 +287,13 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,  		ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",  			    __func__, nbytes, ce_state->src_sz_max); -	ath10k_pci_wake(ar); +	ret = ath10k_pci_wake(ar); +	if (ret) +		return ret;  	if (unlikely(CE_RING_DELTA(nentries_mask,  				   write_index, sw_index - 1) <= 0)) { -		ret = -EIO; +		ret = -ENOSR;  		goto exit;  	} @@ -346,7 +329,34 @@ exit:  	return ret;  } -int ath10k_ce_send(struct ce_state *ce_state, +void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) +{ +	struct ath10k *ar = pipe->ar; +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	struct ath10k_ce_ring *src_ring = pipe->src_ring; +	u32 ctrl_addr = pipe->ctrl_addr; + +	lockdep_assert_held(&ar_pci->ce_lock); + +	/* +	 * This function must be called only if there is an incomplete +	 * scatter-gather transfer (before index register is updated) +	 * that needs to be cleaned up. +	 */ +	if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index)) +		return; + +	if (WARN_ON_ONCE(src_ring->write_index == +			 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr))) +		return; + +	src_ring->write_index--; +	src_ring->write_index &= src_ring->nentries_mask; + +	src_ring->per_transfer_context[src_ring->write_index] = NULL; +} + +int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,  		   void *per_transfer_context,  		   u32 buffer,  		   unsigned int nbytes, @@ -365,77 +375,26 @@ int ath10k_ce_send(struct ce_state *ce_state,  	return ret;  } -void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer, -				unsigned int nbytes, u32 flags) -{ -	unsigned int num_items = sendlist->num_items; -	struct ce_sendlist_item *item; - -	item = &sendlist->item[num_items]; -	item->data = buffer; -	item->u.nbytes = nbytes; -	item->flags = flags; -	sendlist->num_items++; -} - -int ath10k_ce_sendlist_send(struct ce_state *ce_state, -			    void *per_transfer_context, -			    struct ce_sendlist *sendlist, -			    unsigned int transfer_id) +int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)  { -	struct ce_ring_state *src_ring = ce_state->src_ring; -	struct ce_sendlist_item *item; -	struct ath10k *ar = ce_state->ar; +	struct ath10k *ar = pipe->ar;  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	unsigned int nentries_mask = src_ring->nentries_mask; -	unsigned int num_items = sendlist->num_items; -	unsigned int sw_index; -	unsigned int write_index; -	int i, delta, ret = -ENOMEM; +	int delta;  	spin_lock_bh(&ar_pci->ce_lock); - -	sw_index = src_ring->sw_index; -	write_index = src_ring->write_index; - -	delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); - -	if (delta >= num_items) { -		/* -		 * Handle all but the last item uniformly. -		 */ -		for (i = 0; i < num_items - 1; i++) { -			item = &sendlist->item[i]; -			ret = ath10k_ce_send_nolock(ce_state, -						    CE_SENDLIST_ITEM_CTXT, -						    (u32) item->data, -						    item->u.nbytes, transfer_id, -						    item->flags | -						    CE_SEND_FLAG_GATHER); -			if (ret) -				ath10k_warn("CE send failed for item: %d\n", i); -		} -		/* -		 * Provide valid context pointer for final item. -		 */ -		item = &sendlist->item[i]; -		ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, -					    (u32) item->data, item->u.nbytes, -					    transfer_id, item->flags); -		if (ret) -			ath10k_warn("CE send failed for last item: %d\n", i); -	} - +	delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, +			      pipe->src_ring->write_index, +			      pipe->src_ring->sw_index - 1);  	spin_unlock_bh(&ar_pci->ce_lock); -	return ret; +	return delta;  } -int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, +int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,  			       void *per_recv_context,  			       u32 buffer)  { -	struct ce_ring_state *dest_ring = ce_state->dest_ring; +	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;  	u32 ctrl_addr = ce_state->ctrl_addr;  	struct ath10k *ar = ce_state->ar;  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -448,7 +407,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,  	write_index = dest_ring->write_index;  	sw_index = dest_ring->sw_index; -	ath10k_pci_wake(ar); +	ret = ath10k_pci_wake(ar); +	if (ret) +		goto out;  	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {  		struct ce_desc *base = dest_ring->base_addr_owner_space; @@ -470,6 +431,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,  		ret = -EIO;  	}  	ath10k_pci_sleep(ar); + +out:  	spin_unlock_bh(&ar_pci->ce_lock);  	return ret; @@ -479,14 +442,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,   * Guts of ath10k_ce_completed_recv_next.   * The caller takes responsibility for any necessary locking.   */ -static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, +static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,  						void **per_transfer_contextp,  						u32 *bufferp,  						unsigned int *nbytesp,  						unsigned int *transfer_idp,  						unsigned int *flagsp)  { -	struct ce_ring_state *dest_ring = ce_state->dest_ring; +	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;  	unsigned int nentries_mask = dest_ring->nentries_mask;  	unsigned int sw_index = dest_ring->sw_index; @@ -535,7 +498,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,  	return 0;  } -int ath10k_ce_completed_recv_next(struct ce_state *ce_state, +int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,  				  void **per_transfer_contextp,  				  u32 *bufferp,  				  unsigned int *nbytesp, @@ -556,11 +519,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,  	return ret;  } -int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, +int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,  			       void **per_transfer_contextp,  			       u32 *bufferp)  { -	struct ce_ring_state *dest_ring; +	struct ath10k_ce_ring *dest_ring;  	unsigned int nentries_mask;  	unsigned int sw_index;  	unsigned int write_index; @@ -612,19 +575,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,   * Guts of ath10k_ce_completed_send_next.   * The caller takes responsibility for any necessary locking.   */ -static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, +static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,  						void **per_transfer_contextp,  						u32 *bufferp,  						unsigned int *nbytesp,  						unsigned int *transfer_idp)  { -	struct ce_ring_state *src_ring = ce_state->src_ring; +	struct ath10k_ce_ring *src_ring = ce_state->src_ring;  	u32 ctrl_addr = ce_state->ctrl_addr;  	struct ath10k *ar = ce_state->ar;  	unsigned int nentries_mask = src_ring->nentries_mask;  	unsigned int sw_index = src_ring->sw_index; +	struct ce_desc *sdesc, *sbase;  	unsigned int read_index; -	int ret = -EIO; +	int ret;  	if (src_ring->hw_index == sw_index) {  		/* @@ -634,48 +598,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,  		 * the SW has really caught up to the HW, or if the cached  		 * value of the HW index has become stale.  		 */ -		ath10k_pci_wake(ar); + +		ret = ath10k_pci_wake(ar); +		if (ret) +			return ret; +  		src_ring->hw_index =  			ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);  		src_ring->hw_index &= nentries_mask; +  		ath10k_pci_sleep(ar);  	} +  	read_index = src_ring->hw_index; -	if ((read_index != sw_index) && (read_index != 0xffffffff)) { -		struct ce_desc *sbase = src_ring->shadow_base; -		struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); +	if ((read_index == sw_index) || (read_index == 0xffffffff)) +		return -EIO; -		/* Return data from completed source descriptor */ -		*bufferp = __le32_to_cpu(sdesc->addr); -		*nbytesp = __le16_to_cpu(sdesc->nbytes); -		*transfer_idp = MS(__le16_to_cpu(sdesc->flags), -						CE_DESC_FLAGS_META_DATA); +	sbase = src_ring->shadow_base; +	sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); -		if (per_transfer_contextp) -			*per_transfer_contextp = -				src_ring->per_transfer_context[sw_index]; +	/* Return data from completed source descriptor */ +	*bufferp = __le32_to_cpu(sdesc->addr); +	*nbytesp = __le16_to_cpu(sdesc->nbytes); +	*transfer_idp = MS(__le16_to_cpu(sdesc->flags), +			   CE_DESC_FLAGS_META_DATA); -		/* sanity */ -		src_ring->per_transfer_context[sw_index] = NULL; +	if (per_transfer_contextp) +		*per_transfer_contextp = +			src_ring->per_transfer_context[sw_index]; -		/* Update sw_index */ -		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); -		src_ring->sw_index = sw_index; -		ret = 0; -	} +	/* sanity */ +	src_ring->per_transfer_context[sw_index] = NULL; -	return ret; +	/* Update sw_index */ +	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); +	src_ring->sw_index = sw_index; + +	return 0;  }  /* NB: Modeled after ath10k_ce_completed_send_next */ -int ath10k_ce_cancel_send_next(struct ce_state *ce_state, +int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,  			       void **per_transfer_contextp,  			       u32 *bufferp,  			       unsigned int *nbytesp,  			       unsigned int *transfer_idp)  { -	struct ce_ring_state *src_ring; +	struct ath10k_ce_ring *src_ring;  	unsigned int nentries_mask;  	unsigned int sw_index;  	unsigned int write_index; @@ -727,7 +697,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,  	return ret;  } -int ath10k_ce_completed_send_next(struct ce_state *ce_state, +int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,  				  void **per_transfer_contextp,  				  u32 *bufferp,  				  unsigned int *nbytesp, @@ -756,53 +726,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,  void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; +	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];  	u32 ctrl_addr = ce_state->ctrl_addr; -	void *transfer_context; -	u32 buf; -	unsigned int nbytes; -	unsigned int id; -	unsigned int flags; +	int ret; + +	ret = ath10k_pci_wake(ar); +	if (ret) +		return; -	ath10k_pci_wake(ar);  	spin_lock_bh(&ar_pci->ce_lock);  	/* Clear the copy-complete interrupts that will be handled here. */  	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,  					  HOST_IS_COPY_COMPLETE_MASK); -	if (ce_state->recv_cb) { -		/* -		 * Pop completed recv buffers and call the registered -		 * recv callback for each -		 */ -		while (ath10k_ce_completed_recv_next_nolock(ce_state, -							    &transfer_context, -							    &buf, &nbytes, -							    &id, &flags) == 0) { -			spin_unlock_bh(&ar_pci->ce_lock); -			ce_state->recv_cb(ce_state, transfer_context, buf, -					  nbytes, id, flags); -			spin_lock_bh(&ar_pci->ce_lock); -		} -	} +	spin_unlock_bh(&ar_pci->ce_lock); -	if (ce_state->send_cb) { -		/* -		 * Pop completed send buffers and call the registered -		 * send callback for each -		 */ -		while (ath10k_ce_completed_send_next_nolock(ce_state, -							    &transfer_context, -							    &buf, -							    &nbytes, -							    &id) == 0) { -			spin_unlock_bh(&ar_pci->ce_lock); -			ce_state->send_cb(ce_state, transfer_context, -					  buf, nbytes, id); -			spin_lock_bh(&ar_pci->ce_lock); -		} -	} +	if (ce_state->recv_cb) +		ce_state->recv_cb(ce_state); + +	if (ce_state->send_cb) +		ce_state->send_cb(ce_state); + +	spin_lock_bh(&ar_pci->ce_lock);  	/*  	 * Misc CE interrupts are not being handled, but still need @@ -822,14 +768,16 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)  void ath10k_ce_per_engine_service_any(struct ath10k *ar)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int ce_id; +	int ce_id, ret;  	u32 intr_summary; -	ath10k_pci_wake(ar); +	ret = ath10k_pci_wake(ar); +	if (ret) +		return; +  	intr_summary = CE_INTERRUPT_SUMMARY(ar); -	for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { +	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {  		if (intr_summary & (1 << ce_id))  			intr_summary &= ~(1 << ce_id);  		else @@ -849,13 +797,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)   *   * Called with ce_lock held.   */ -static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, +static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,  						int disable_copy_compl_intr)  {  	u32 ctrl_addr = ce_state->ctrl_addr;  	struct ath10k *ar = ce_state->ar; +	int ret; -	ath10k_pci_wake(ar); +	ret = ath10k_pci_wake(ar); +	if (ret) +		return;  	if ((!disable_copy_compl_intr) &&  	    (ce_state->send_cb || ce_state->recv_cb)) @@ -868,27 +819,29 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,  	ath10k_pci_sleep(ar);  } -void ath10k_ce_disable_interrupts(struct ath10k *ar) +int ath10k_ce_disable_interrupts(struct ath10k *ar)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int ce_id; +	int ce_id, ret; -	ath10k_pci_wake(ar); -	for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { -		struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; -		u32 ctrl_addr = ce_state->ctrl_addr; +	ret = ath10k_pci_wake(ar); +	if (ret) +		return ret; + +	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { +		u32 ctrl_addr = ath10k_ce_base_address(ce_id);  		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); +		ath10k_ce_error_intr_disable(ar, ctrl_addr); +		ath10k_ce_watermark_intr_disable(ar, ctrl_addr);  	} +  	ath10k_pci_sleep(ar); + +	return 0;  } -void ath10k_ce_send_cb_register(struct ce_state *ce_state, -				void (*send_cb) (struct ce_state *ce_state, -						 void *transfer_context, -						 u32 buffer, -						 unsigned int nbytes, -						 unsigned int transfer_id), +void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, +				void (*send_cb)(struct ath10k_ce_pipe *),  				int disable_interrupts)  {  	struct ath10k *ar = ce_state->ar; @@ -900,13 +853,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,  	spin_unlock_bh(&ar_pci->ce_lock);  } -void ath10k_ce_recv_cb_register(struct ce_state *ce_state, -				void (*recv_cb) (struct ce_state *ce_state, -						 void *transfer_context, -						 u32 buffer, -						 unsigned int nbytes, -						 unsigned int transfer_id, -						 unsigned int flags)) +void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, +				void (*recv_cb)(struct ath10k_ce_pipe *))  {  	struct ath10k *ar = ce_state->ar;  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -919,37 +867,18 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,  static int ath10k_ce_init_src_ring(struct ath10k *ar,  				   unsigned int ce_id, -				   struct ce_state *ce_state,  				   const struct ce_attr *attr)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ce_ring_state *src_ring; -	unsigned int nentries = attr->src_nentries; -	unsigned int ce_nbytes; -	u32 ctrl_addr = ath10k_ce_base_address(ce_id); -	dma_addr_t base_addr; -	char *ptr; - -	nentries = roundup_pow_of_two(nentries); - -	if (ce_state->src_ring) { -		WARN_ON(ce_state->src_ring->nentries != nentries); -		return 0; -	} - -	ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); -	ptr = kzalloc(ce_nbytes, GFP_KERNEL); -	if (ptr == NULL) -		return -ENOMEM; +	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; +	struct ath10k_ce_ring *src_ring = ce_state->src_ring; +	u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); -	ce_state->src_ring = (struct ce_ring_state *)ptr; -	src_ring = ce_state->src_ring; +	nentries = roundup_pow_of_two(attr->src_nentries); -	ptr += sizeof(struct ce_ring_state); -	src_ring->nentries = nentries; -	src_ring->nentries_mask = nentries - 1; +	memset(src_ring->per_transfer_context, 0, +	       nentries * sizeof(*src_ring->per_transfer_context)); -	ath10k_pci_wake(ar);  	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);  	src_ring->sw_index &= src_ring->nentries_mask;  	src_ring->hw_index = src_ring->sw_index; @@ -957,19 +886,90 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,  	src_ring->write_index =  		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);  	src_ring->write_index &= src_ring->nentries_mask; -	ath10k_pci_sleep(ar); -	src_ring->per_transfer_context = (void **)ptr; +	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, +					 src_ring->base_addr_ce_space); +	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); +	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); +	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); +	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); +	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); + +	ath10k_dbg(ATH10K_DBG_BOOT, +		   "boot init ce src ring id %d entries %d base_addr %p\n", +		   ce_id, nentries, src_ring->base_addr_owner_space); + +	return 0; +} + +static int ath10k_ce_init_dest_ring(struct ath10k *ar, +				    unsigned int ce_id, +				    const struct ce_attr *attr) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; +	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; +	u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); + +	nentries = roundup_pow_of_two(attr->dest_nentries); + +	memset(dest_ring->per_transfer_context, 0, +	       nentries * sizeof(*dest_ring->per_transfer_context)); + +	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); +	dest_ring->sw_index &= dest_ring->nentries_mask; +	dest_ring->write_index = +		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); +	dest_ring->write_index &= dest_ring->nentries_mask; + +	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, +					  dest_ring->base_addr_ce_space); +	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); +	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); +	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); +	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); + +	ath10k_dbg(ATH10K_DBG_BOOT, +		   "boot ce dest ring id %d entries %d base_addr %p\n", +		   ce_id, nentries, dest_ring->base_addr_owner_space); + +	return 0; +} + +static struct ath10k_ce_ring * +ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, +			 const struct ce_attr *attr) +{ +	struct ath10k_ce_ring *src_ring; +	u32 nentries = attr->src_nentries; +	dma_addr_t base_addr; + +	nentries = roundup_pow_of_two(nentries); + +	src_ring = kzalloc(sizeof(*src_ring) + +			   (nentries * +			    sizeof(*src_ring->per_transfer_context)), +			   GFP_KERNEL); +	if (src_ring == NULL) +		return ERR_PTR(-ENOMEM); + +	src_ring->nentries = nentries; +	src_ring->nentries_mask = nentries - 1;  	/*  	 * Legacy platforms that do not support cache  	 * coherent DMA are unsupported  	 */  	src_ring->base_addr_owner_space_unaligned = -		pci_alloc_consistent(ar_pci->pdev, -				     (nentries * sizeof(struct ce_desc) + -				      CE_DESC_RING_ALIGN), -				     &base_addr); +		dma_alloc_coherent(ar->dev, +				   (nentries * sizeof(struct ce_desc) + +				    CE_DESC_RING_ALIGN), +				   &base_addr, GFP_KERNEL); +	if (!src_ring->base_addr_owner_space_unaligned) { +		kfree(src_ring); +		return ERR_PTR(-ENOMEM); +	} +  	src_ring->base_addr_ce_space_unaligned = base_addr;  	src_ring->base_addr_owner_space = PTR_ALIGN( @@ -986,75 +986,57 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,  	src_ring->shadow_base_unaligned =  		kmalloc((nentries * sizeof(struct ce_desc) +  			 CE_DESC_RING_ALIGN), GFP_KERNEL); +	if (!src_ring->shadow_base_unaligned) { +		dma_free_coherent(ar->dev, +				  (nentries * sizeof(struct ce_desc) + +				   CE_DESC_RING_ALIGN), +				  src_ring->base_addr_owner_space, +				  src_ring->base_addr_ce_space); +		kfree(src_ring); +		return ERR_PTR(-ENOMEM); +	}  	src_ring->shadow_base = PTR_ALIGN(  			src_ring->shadow_base_unaligned,  			CE_DESC_RING_ALIGN); -	ath10k_pci_wake(ar); -	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, -					 src_ring->base_addr_ce_space); -	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); -	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); -	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); -	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); -	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); -	ath10k_pci_sleep(ar); - -	return 0; +	return src_ring;  } -static int ath10k_ce_init_dest_ring(struct ath10k *ar, -				    unsigned int ce_id, -				    struct ce_state *ce_state, -				    const struct ce_attr *attr) +static struct ath10k_ce_ring * +ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, +			  const struct ce_attr *attr)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ce_ring_state *dest_ring; -	unsigned int nentries = attr->dest_nentries; -	unsigned int ce_nbytes; -	u32 ctrl_addr = ath10k_ce_base_address(ce_id); +	struct ath10k_ce_ring *dest_ring; +	u32 nentries;  	dma_addr_t base_addr; -	char *ptr; - -	nentries = roundup_pow_of_two(nentries); - -	if (ce_state->dest_ring) { -		WARN_ON(ce_state->dest_ring->nentries != nentries); -		return 0; -	} -	ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); -	ptr = kzalloc(ce_nbytes, GFP_KERNEL); -	if (ptr == NULL) -		return -ENOMEM; +	nentries = roundup_pow_of_two(attr->dest_nentries); -	ce_state->dest_ring = (struct ce_ring_state *)ptr; -	dest_ring = ce_state->dest_ring; +	dest_ring = kzalloc(sizeof(*dest_ring) + +			    (nentries * +			     sizeof(*dest_ring->per_transfer_context)), +			    GFP_KERNEL); +	if (dest_ring == NULL) +		return ERR_PTR(-ENOMEM); -	ptr += sizeof(struct ce_ring_state);  	dest_ring->nentries = nentries;  	dest_ring->nentries_mask = nentries - 1; -	ath10k_pci_wake(ar); -	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); -	dest_ring->sw_index &= dest_ring->nentries_mask; -	dest_ring->write_index = -		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); -	dest_ring->write_index &= dest_ring->nentries_mask; -	ath10k_pci_sleep(ar); - -	dest_ring->per_transfer_context = (void **)ptr; -  	/*  	 * Legacy platforms that do not support cache  	 * coherent DMA are unsupported  	 */  	dest_ring->base_addr_owner_space_unaligned = -		pci_alloc_consistent(ar_pci->pdev, -				     (nentries * sizeof(struct ce_desc) + -				      CE_DESC_RING_ALIGN), -				     &base_addr); +		dma_alloc_coherent(ar->dev, +				   (nentries * sizeof(struct ce_desc) + +				    CE_DESC_RING_ALIGN), +				   &base_addr, GFP_KERNEL); +	if (!dest_ring->base_addr_owner_space_unaligned) { +		kfree(dest_ring); +		return ERR_PTR(-ENOMEM); +	} +  	dest_ring->base_addr_ce_space_unaligned = base_addr;  	/* @@ -1071,124 +1053,161 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,  			dest_ring->base_addr_ce_space_unaligned,  			CE_DESC_RING_ALIGN); -	ath10k_pci_wake(ar); -	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, -					  dest_ring->base_addr_ce_space); -	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); -	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); -	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); -	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); -	ath10k_pci_sleep(ar); - -	return 0; +	return dest_ring;  } -static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, -					     unsigned int ce_id, -					     const struct ce_attr *attr) +/* + * Initialize a Copy Engine based on caller-supplied attributes. + * This may be called once to initialize both source and destination + * rings or it may be called twice for separate source and destination + * initialization. It may be that only one side or the other is + * initialized by software/firmware. + */ +int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, +			const struct ce_attr *attr)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ce_state *ce_state = NULL; -	u32 ctrl_addr = ath10k_ce_base_address(ce_id); +	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; +	int ret; + +	/* +	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid +	 * additional TX locking checks. +	 * +	 * For the lack of a better place do the check here. +	 */ +	BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > +		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); +	BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > +		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + +	ret = ath10k_pci_wake(ar); +	if (ret) +		return ret;  	spin_lock_bh(&ar_pci->ce_lock); +	ce_state->ar = ar; +	ce_state->id = ce_id; +	ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); +	ce_state->attr_flags = attr->flags; +	ce_state->src_sz_max = attr->src_sz_max; +	spin_unlock_bh(&ar_pci->ce_lock); -	if (!ar_pci->ce_id_to_state[ce_id]) { -		ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC); -		if (ce_state == NULL) { -			spin_unlock_bh(&ar_pci->ce_lock); -			return NULL; +	if (attr->src_nentries) { +		ret = ath10k_ce_init_src_ring(ar, ce_id, attr); +		if (ret) { +			ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", +				   ce_id, ret); +			goto out;  		} +	} -		ar_pci->ce_id_to_state[ce_id] = ce_state; -		ce_state->ar = ar; -		ce_state->id = ce_id; -		ce_state->ctrl_addr = ctrl_addr; -		ce_state->state = CE_RUNNING; -		/* Save attribute flags */ -		ce_state->attr_flags = attr->flags; -		ce_state->src_sz_max = attr->src_sz_max; +	if (attr->dest_nentries) { +		ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); +		if (ret) { +			ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", +				   ce_id, ret); +			goto out; +		}  	} -	spin_unlock_bh(&ar_pci->ce_lock); +out: +	ath10k_pci_sleep(ar); +	return ret; +} + +static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) +{ +	u32 ctrl_addr = ath10k_ce_base_address(ce_id); -	return ce_state; +	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); +	ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); +	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); +	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);  } -/* - * Initialize a Copy Engine based on caller-supplied attributes. - * This may be called once to initialize both source and destination - * rings or it may be called twice for separate source and destination - * initialization. It may be that only one side or the other is - * initialized by software/firmware. - */ -struct ce_state *ath10k_ce_init(struct ath10k *ar, -				unsigned int ce_id, -				const struct ce_attr *attr) +static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)  { -	struct ce_state *ce_state;  	u32 ctrl_addr = ath10k_ce_base_address(ce_id); -	ce_state = ath10k_ce_init_state(ar, ce_id, attr); -	if (!ce_state) { -		ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); -		return NULL; -	} +	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); +	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); +	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); +} + +void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) +{ +	int ret; + +	ret = ath10k_pci_wake(ar); +	if (ret) +		return; + +	ath10k_ce_deinit_src_ring(ar, ce_id); +	ath10k_ce_deinit_dest_ring(ar, ce_id); + +	ath10k_pci_sleep(ar); +} + +int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, +			 const struct ce_attr *attr) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; +	int ret;  	if (attr->src_nentries) { -		if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) { -			ath10k_err("Failed to initialize CE src ring for ID: %d\n", -				   ce_id); -			ath10k_ce_deinit(ce_state); -			return NULL; +		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); +		if (IS_ERR(ce_state->src_ring)) { +			ret = PTR_ERR(ce_state->src_ring); +			ath10k_err("failed to allocate copy engine source ring %d: %d\n", +				   ce_id, ret); +			ce_state->src_ring = NULL; +			return ret;  		}  	}  	if (attr->dest_nentries) { -		if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) { -			ath10k_err("Failed to initialize CE dest ring for ID: %d\n", -				   ce_id); -			ath10k_ce_deinit(ce_state); -			return NULL; +		ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id, +								attr); +		if (IS_ERR(ce_state->dest_ring)) { +			ret = PTR_ERR(ce_state->dest_ring); +			ath10k_err("failed to allocate copy engine destination ring %d: %d\n", +				   ce_id, ret); +			ce_state->dest_ring = NULL; +			return ret;  		}  	} -	/* Enable CE error interrupts */ -	ath10k_pci_wake(ar); -	ath10k_ce_error_intr_enable(ar, ctrl_addr); -	ath10k_pci_sleep(ar); - -	return ce_state; +	return 0;  } -void ath10k_ce_deinit(struct ce_state *ce_state) +void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)  { -	unsigned int ce_id = ce_state->id; -	struct ath10k *ar = ce_state->ar;  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - -	ce_state->state = CE_UNUSED; -	ar_pci->ce_id_to_state[ce_id] = NULL; +	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];  	if (ce_state->src_ring) {  		kfree(ce_state->src_ring->shadow_base_unaligned); -		pci_free_consistent(ar_pci->pdev, -				    (ce_state->src_ring->nentries * -				     sizeof(struct ce_desc) + -				     CE_DESC_RING_ALIGN), -				    ce_state->src_ring->base_addr_owner_space, -				    ce_state->src_ring->base_addr_ce_space); +		dma_free_coherent(ar->dev, +				  (ce_state->src_ring->nentries * +				   sizeof(struct ce_desc) + +				   CE_DESC_RING_ALIGN), +				  ce_state->src_ring->base_addr_owner_space, +				  ce_state->src_ring->base_addr_ce_space);  		kfree(ce_state->src_ring);  	}  	if (ce_state->dest_ring) { -		pci_free_consistent(ar_pci->pdev, -				    (ce_state->dest_ring->nentries * -				     sizeof(struct ce_desc) + -				     CE_DESC_RING_ALIGN), -				    ce_state->dest_ring->base_addr_owner_space, -				    ce_state->dest_ring->base_addr_ce_space); +		dma_free_coherent(ar->dev, +				  (ce_state->dest_ring->nentries * +				   sizeof(struct ce_desc) + +				   CE_DESC_RING_ALIGN), +				  ce_state->dest_ring->base_addr_owner_space, +				  ce_state->dest_ring->base_addr_ce_space);  		kfree(ce_state->dest_ring);  	} -	kfree(ce_state); + +	ce_state->src_ring = NULL; +	ce_state->dest_ring = NULL;  } diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index c17f07c026f..7a5a36fc59c 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -23,11 +23,10 @@  /* Maximum number of Copy Engine's supported */  #define CE_COUNT_MAX 8 -#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048 +#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096  /* Descriptor rings must be aligned to this boundary */  #define CE_DESC_RING_ALIGN	8 -#define CE_SENDLIST_ITEMS_MAX	12  #define CE_SEND_FLAG_GATHER	0x00010000  /* @@ -36,16 +35,9 @@   * how to use copy engines.   */ -struct ce_state; +struct ath10k_ce_pipe; -/* Copy Engine operational state */ -enum ce_op_state { -	CE_UNUSED, -	CE_PAUSED, -	CE_RUNNING, -}; -  #define CE_DESC_FLAGS_GATHER         (1 << 0)  #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)  #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC @@ -57,8 +49,7 @@ struct ce_desc {  	__le16 flags; /* %CE_DESC_FLAGS_ */  }; -/* Copy Engine Ring internal state */ -struct ce_ring_state { +struct ath10k_ce_ring {  	/* Number of entries in this ring; must be power of 2 */  	unsigned int nentries;  	unsigned int nentries_mask; @@ -113,52 +104,24 @@ struct ce_ring_state {  	void *shadow_base_unaligned;  	struct ce_desc *shadow_base; -	void **per_transfer_context; +	/* keep last */ +	void *per_transfer_context[0];  }; -/* Copy Engine internal state */ -struct ce_state { +struct ath10k_ce_pipe {  	struct ath10k *ar;  	unsigned int id;  	unsigned int attr_flags;  	u32 ctrl_addr; -	enum ce_op_state state; - -	void (*send_cb) (struct ce_state *ce_state, -			 void *per_transfer_send_context, -			 u32 buffer, -			 unsigned int nbytes, -			 unsigned int transfer_id); -	void (*recv_cb) (struct ce_state *ce_state, -			 void *per_transfer_recv_context, -			 u32 buffer, -			 unsigned int nbytes, -			 unsigned int transfer_id, -			 unsigned int flags); - -	unsigned int src_sz_max; -	struct ce_ring_state *src_ring; -	struct ce_ring_state *dest_ring; -}; -struct ce_sendlist_item { -	/* e.g. buffer or desc list */ -	dma_addr_t data; -	union { -		/* simple buffer */ -		unsigned int nbytes; -		/* Rx descriptor list */ -		unsigned int ndesc; -	} u; -	/* externally-specified flags; OR-ed with internal flags */ -	u32 flags; -}; +	void (*send_cb)(struct ath10k_ce_pipe *); +	void (*recv_cb)(struct ath10k_ce_pipe *); -struct ce_sendlist { -	unsigned int num_items; -	struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX]; +	unsigned int src_sz_max; +	struct ath10k_ce_ring *src_ring; +	struct ath10k_ce_ring *dest_ring;  };  /* Copy Engine settable attributes */ @@ -182,7 +145,7 @@ struct ce_attr;   *   * Implementation note: pushes 1 buffer to Source ring   */ -int ath10k_ce_send(struct ce_state *ce_state, +int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,  		   void *per_transfer_send_context,  		   u32 buffer,  		   unsigned int nbytes, @@ -190,36 +153,20 @@ int ath10k_ce_send(struct ce_state *ce_state,  		   unsigned int transfer_id,  		   unsigned int flags); -void ath10k_ce_send_cb_register(struct ce_state *ce_state, -				void (*send_cb) (struct ce_state *ce_state, -						 void *transfer_context, -						 u32 buffer, -						 unsigned int nbytes, -						 unsigned int transfer_id), -				int disable_interrupts); +int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, +			  void *per_transfer_context, +			  u32 buffer, +			  unsigned int nbytes, +			  unsigned int transfer_id, +			  unsigned int flags); -/* Append a simple buffer (address/length) to a sendlist. */ -void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, -				u32 buffer, -				unsigned int nbytes, -				/* OR-ed with internal flags */ -				u32 flags); +void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe); -/* - * Queue a "sendlist" of buffers to be sent using gather to a single - * anonymous destination buffer - *   ce         - which copy engine to use - *   sendlist        - list of simple buffers to send using gather - *   transfer_id     - arbitrary ID; reflected to destination - * Returns 0 on success; otherwise an error status. - * - * Implemenation note: Pushes multiple buffers with Gather to Source ring. - */ -int ath10k_ce_sendlist_send(struct ce_state *ce_state, -			    void *per_transfer_send_context, -			    struct ce_sendlist *sendlist, -			    /* 14 bits */ -			    unsigned int transfer_id); +void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, +				void (*send_cb)(struct ath10k_ce_pipe *), +				int disable_interrupts); + +int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);  /*==================Recv=======================*/ @@ -233,17 +180,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,   *   * Implemenation note: Pushes a buffer to Dest ring.   */ -int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, +int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,  			       void *per_transfer_recv_context,  			       u32 buffer); -void ath10k_ce_recv_cb_register(struct ce_state *ce_state, -				void (*recv_cb) (struct ce_state *ce_state, -						 void *transfer_context, -						 u32 buffer, -						 unsigned int nbytes, -						 unsigned int transfer_id, -						 unsigned int flags)); +void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, +				void (*recv_cb)(struct ath10k_ce_pipe *));  /* recv flags */  /* Data is byte-swapped */ @@ -253,7 +195,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,   * Supply data for the next completed unprocessed receive descriptor.   * Pops buffer from Dest ring.   */ -int ath10k_ce_completed_recv_next(struct ce_state *ce_state, +int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,  				  void **per_transfer_contextp,  				  u32 *bufferp,  				  unsigned int *nbytesp, @@ -263,7 +205,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,   * Supply data for the next completed unprocessed send descriptor.   * Pops 1 completed send buffer from Source ring.   */ -int ath10k_ce_completed_send_next(struct ce_state *ce_state, +int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,  			   void **per_transfer_contextp,  			   u32 *bufferp,  			   unsigned int *nbytesp, @@ -271,10 +213,12 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,  /*==================CE Engine Initialization=======================*/ -/* Initialize an instance of a CE */ -struct ce_state *ath10k_ce_init(struct ath10k *ar, -				unsigned int ce_id, -				const struct ce_attr *attr); +int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, +			const struct ce_attr *attr); +void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); +int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, +			  const struct ce_attr *attr); +void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);  /*==================CE Engine Shutdown=======================*/  /* @@ -282,7 +226,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,   * receive buffers.  Target DMA must be stopped before using   * this API.   */ -int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, +int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,  			       void **per_transfer_contextp,  			       u32 *bufferp); @@ -291,18 +235,16 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,   * pending sends.  Target DMA must be stopped before using   * this API.   */ -int ath10k_ce_cancel_send_next(struct ce_state *ce_state, +int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,  			       void **per_transfer_contextp,  			       u32 *bufferp,  			       unsigned int *nbytesp,  			       unsigned int *transfer_idp); -void ath10k_ce_deinit(struct ce_state *ce_state); -  /*==================CE Interrupt Handlers====================*/  void ath10k_ce_per_engine_service_any(struct ath10k *ar);  void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); -void ath10k_ce_disable_interrupts(struct ath10k *ar); +int ath10k_ce_disable_interrupts(struct ath10k *ar);  /* ce_attr.flags values */  /* Use NonSnooping PCIe accesses? */ @@ -322,9 +264,6 @@ struct ce_attr {  	/* CE_ATTR_* values */  	unsigned int flags; -	/* currently not in use */ -	unsigned int priority; -  	/* #entries in source ring - Must be a power of 2 */  	unsigned int src_nentries; @@ -336,21 +275,8 @@ struct ce_attr {  	/* #entries in destination ring - Must be a power of 2 */  	unsigned int dest_nentries; - -	/* Future use */ -	void *reserved;  }; -/* - * When using sendlist_send to transfer multiple buffer fragments, the - * transfer context of each fragment, except last one, will be filled - * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for - * each fragment done with send and the transfer context would be - * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the - * status of a send completion. - */ -#define CE_SENDLIST_ITEM_CTXT	((void *)0xcecebeef) -  #define SR_BA_ADDRESS		0x0000  #define SR_SIZE_ADDRESS		0x0004  #define DR_BA_ADDRESS		0x0008 diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 7226c23b956..e6c56c5bb0f 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -39,17 +39,6 @@ MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");  static const struct ath10k_hw_params ath10k_hw_params_list[] = {  	{ -		.id = QCA988X_HW_1_0_VERSION, -		.name = "qca988x hw1.0", -		.patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR, -		.fw = { -			.dir = QCA988X_HW_1_0_FW_DIR, -			.fw = QCA988X_HW_1_0_FW_FILE, -			.otp = QCA988X_HW_1_0_OTP_FILE, -			.board = QCA988X_HW_1_0_BOARD_DATA_FILE, -		}, -	}, -	{  		.id = QCA988X_HW_2_0_VERSION,  		.name = "qca988x hw2.0",  		.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, @@ -64,61 +53,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {  static void ath10k_send_suspend_complete(struct ath10k *ar)  { -	ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__); - -	ar->is_target_paused = true; -	wake_up(&ar->event_queue); -} - -static int ath10k_check_fw_version(struct ath10k *ar) -{ -	char version[32]; - -	if (ar->fw_version_major >= SUPPORTED_FW_MAJOR && -	    ar->fw_version_minor >= SUPPORTED_FW_MINOR && -	    ar->fw_version_release >= SUPPORTED_FW_RELEASE && -	    ar->fw_version_build >= SUPPORTED_FW_BUILD) -		return 0; - -	snprintf(version, sizeof(version), "%u.%u.%u.%u", -		 SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR, -		 SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD); - -	ath10k_warn("WARNING: Firmware version %s is not officially supported.\n", -		    ar->hw->wiphy->fw_version); -	ath10k_warn("Please upgrade to version %s (or newer)\n", version); - -	return 0; -} - -static int ath10k_init_connect_htc(struct ath10k *ar) -{ -	int status; - -	status = ath10k_wmi_connect_htc_service(ar); -	if (status) -		goto conn_fail; - -	/* Start HTC */ -	status = ath10k_htc_start(&ar->htc); -	if (status) -		goto conn_fail; +	ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n"); -	/* Wait for WMI event to be ready */ -	status = ath10k_wmi_wait_for_service_ready(ar); -	if (status <= 0) { -		ath10k_warn("wmi service ready event not received"); -		status = -ETIMEDOUT; -		goto timeout; -	} - -	ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n"); -	return 0; - -timeout: -	ath10k_htc_stop(&ar->htc); -conn_fail: -	return status; +	complete(&ar->target_suspend);  }  static int ath10k_init_configure_target(struct ath10k *ar) @@ -200,8 +137,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,  	return fw;  } -static int ath10k_push_board_ext_data(struct ath10k *ar, -				      const struct firmware *fw) +static int ath10k_push_board_ext_data(struct ath10k *ar)  {  	u32 board_data_size = QCA988X_BOARD_DATA_SZ;  	u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; @@ -214,21 +150,21 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,  		return ret;  	} -	ath10k_dbg(ATH10K_DBG_CORE, -		   "ath10k: Board extended Data download addr: 0x%x\n", +	ath10k_dbg(ATH10K_DBG_BOOT, +		   "boot push board extended data addr 0x%x\n",  		   board_ext_data_addr);  	if (board_ext_data_addr == 0)  		return 0; -	if (fw->size != (board_data_size + board_ext_data_size)) { +	if (ar->board_len != (board_data_size + board_ext_data_size)) {  		ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", -			   fw->size, board_data_size, board_ext_data_size); +			   ar->board_len, board_data_size, board_ext_data_size);  		return -EINVAL;  	}  	ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, -				      fw->data + board_data_size, +				      ar->board_data + board_data_size,  				      board_ext_data_size);  	if (ret) {  		ath10k_err("could not write board ext data (%d)\n", ret); @@ -247,12 +183,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,  static int ath10k_download_board_data(struct ath10k *ar)  { -	const struct firmware *fw = ar->board_data;  	u32 board_data_size = QCA988X_BOARD_DATA_SZ;  	u32 address;  	int ret; -	ret = ath10k_push_board_ext_data(ar, fw); +	ret = ath10k_push_board_ext_data(ar);  	if (ret) {  		ath10k_err("could not push board ext data (%d)\n", ret);  		goto exit; @@ -264,8 +199,9 @@ static int ath10k_download_board_data(struct ath10k *ar)  		goto exit;  	} -	ret = ath10k_bmi_write_memory(ar, address, fw->data, -				      min_t(u32, board_data_size, fw->size)); +	ret = ath10k_bmi_write_memory(ar, address, ar->board_data, +				      min_t(u32, board_data_size, +					    ar->board_len));  	if (ret) {  		ath10k_err("could not write board data (%d)\n", ret);  		goto exit; @@ -283,42 +219,51 @@ exit:  static int ath10k_download_and_run_otp(struct ath10k *ar)  { -	const struct firmware *fw = ar->otp; -	u32 address = ar->hw_params.patch_load_addr; -	u32 exec_param; +	u32 result, address = ar->hw_params.patch_load_addr;  	int ret;  	/* OTP is optional */ -	if (!ar->otp) +	if (!ar->otp_data || !ar->otp_len) { +		ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", +			    ar->otp_data, ar->otp_len);  		return 0; +	} + +	ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", +		   address, ar->otp_len); -	ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); +	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);  	if (ret) {  		ath10k_err("could not write otp (%d)\n", ret); -		goto exit; +		return ret;  	} -	exec_param = 0; -	ret = ath10k_bmi_execute(ar, address, &exec_param); +	ret = ath10k_bmi_execute(ar, address, 0, &result);  	if (ret) {  		ath10k_err("could not execute otp (%d)\n", ret); -		goto exit; +		return ret;  	} -exit: -	return ret; +	ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); + +	if (result != 0) { +		ath10k_err("otp calibration failed: %d", result); +		return -EINVAL; +	} + +	return 0;  }  static int ath10k_download_fw(struct ath10k *ar)  { -	const struct firmware *fw = ar->firmware;  	u32 address;  	int ret;  	address = ar->hw_params.patch_load_addr; -	ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); +	ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data, +				       ar->firmware_len);  	if (ret) {  		ath10k_err("could not write fw (%d)\n", ret);  		goto exit; @@ -330,8 +275,8 @@ exit:  static void ath10k_core_free_firmware_files(struct ath10k *ar)  { -	if (ar->board_data && !IS_ERR(ar->board_data)) -		release_firmware(ar->board_data); +	if (ar->board && !IS_ERR(ar->board)) +		release_firmware(ar->board);  	if (ar->otp && !IS_ERR(ar->otp))  		release_firmware(ar->otp); @@ -339,12 +284,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)  	if (ar->firmware && !IS_ERR(ar->firmware))  		release_firmware(ar->firmware); +	ar->board = NULL;  	ar->board_data = NULL; +	ar->board_len = 0; +  	ar->otp = NULL; +	ar->otp_data = NULL; +	ar->otp_len = 0; +  	ar->firmware = NULL; +	ar->firmware_data = NULL; +	ar->firmware_len = 0;  } -static int ath10k_core_fetch_firmware_files(struct ath10k *ar) +static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)  {  	int ret = 0; @@ -358,15 +311,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)  		return -EINVAL;  	} -	ar->board_data = ath10k_fetch_fw_file(ar, -					      ar->hw_params.fw.dir, -					      ar->hw_params.fw.board); -	if (IS_ERR(ar->board_data)) { -		ret = PTR_ERR(ar->board_data); +	ar->board = ath10k_fetch_fw_file(ar, +					 ar->hw_params.fw.dir, +					 ar->hw_params.fw.board); +	if (IS_ERR(ar->board)) { +		ret = PTR_ERR(ar->board);  		ath10k_err("could not fetch board data (%d)\n", ret);  		goto err;  	} +	ar->board_data = ar->board->data; +	ar->board_len = ar->board->size; +  	ar->firmware = ath10k_fetch_fw_file(ar,  					    ar->hw_params.fw.dir,  					    ar->hw_params.fw.fw); @@ -376,6 +332,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)  		goto err;  	} +	ar->firmware_data = ar->firmware->data; +	ar->firmware_len = ar->firmware->size; +  	/* OTP may be undefined. If so, don't fetch it at all */  	if (ar->hw_params.fw.otp == NULL)  		return 0; @@ -389,6 +348,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)  		goto err;  	} +	ar->otp_data = ar->otp->data; +	ar->otp_len = ar->otp->size; +  	return 0;  err: @@ -396,21 +358,220 @@ err:  	return ret;  } +static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) +{ +	size_t magic_len, len, ie_len; +	int ie_id, i, index, bit, ret; +	struct ath10k_fw_ie *hdr; +	const u8 *data; +	__le32 *timestamp; + +	/* first fetch the firmware file (firmware-*.bin) */ +	ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); +	if (IS_ERR(ar->firmware)) { +		ath10k_err("could not fetch firmware file '%s/%s': %ld\n", +			   ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware)); +		return PTR_ERR(ar->firmware); +	} + +	data = ar->firmware->data; +	len = ar->firmware->size; + +	/* magic also includes the null byte, check that as well */ +	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; + +	if (len < magic_len) { +		ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n", +			   ar->hw_params.fw.dir, name, len); +		ret = -EINVAL; +		goto err; +	} + +	if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { +		ath10k_err("invalid firmware magic\n"); +		ret = -EINVAL; +		goto err; +	} + +	/* jump over the padding */ +	magic_len = ALIGN(magic_len, 4); + +	len -= magic_len; +	data += magic_len; + +	/* loop elements */ +	while (len > sizeof(struct ath10k_fw_ie)) { +		hdr = (struct ath10k_fw_ie *)data; + +		ie_id = le32_to_cpu(hdr->id); +		ie_len = le32_to_cpu(hdr->len); + +		len -= sizeof(*hdr); +		data += sizeof(*hdr); + +		if (len < ie_len) { +			ath10k_err("invalid length for FW IE %d (%zu < %zu)\n", +				   ie_id, len, ie_len); +			ret = -EINVAL; +			goto err; +		} + +		switch (ie_id) { +		case ATH10K_FW_IE_FW_VERSION: +			if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1) +				break; + +			memcpy(ar->hw->wiphy->fw_version, data, ie_len); +			ar->hw->wiphy->fw_version[ie_len] = '\0'; + +			ath10k_dbg(ATH10K_DBG_BOOT, +				   "found fw version %s\n", +				    ar->hw->wiphy->fw_version); +			break; +		case ATH10K_FW_IE_TIMESTAMP: +			if (ie_len != sizeof(u32)) +				break; + +			timestamp = (__le32 *)data; + +			ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n", +				   le32_to_cpup(timestamp)); +			break; +		case ATH10K_FW_IE_FEATURES: +			ath10k_dbg(ATH10K_DBG_BOOT, +				   "found firmware features ie (%zd B)\n", +				   ie_len); + +			for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { +				index = i / 8; +				bit = i % 8; + +				if (index == ie_len) +					break; + +				if (data[index] & (1 << bit)) { +					ath10k_dbg(ATH10K_DBG_BOOT, +						   "Enabling feature bit: %i\n", +						   i); +					__set_bit(i, ar->fw_features); +				} +			} + +			ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "", +					ar->fw_features, +					sizeof(ar->fw_features)); +			break; +		case ATH10K_FW_IE_FW_IMAGE: +			ath10k_dbg(ATH10K_DBG_BOOT, +				   "found fw image ie (%zd B)\n", +				   ie_len); + +			ar->firmware_data = data; +			ar->firmware_len = ie_len; + +			break; +		case ATH10K_FW_IE_OTP_IMAGE: +			ath10k_dbg(ATH10K_DBG_BOOT, +				   "found otp image ie (%zd B)\n", +				   ie_len); + +			ar->otp_data = data; +			ar->otp_len = ie_len; + +			break; +		default: +			ath10k_warn("Unknown FW IE: %u\n", +				    le32_to_cpu(hdr->id)); +			break; +		} + +		/* jump over the padding */ +		ie_len = ALIGN(ie_len, 4); + +		len -= ie_len; +		data += ie_len; +	} + +	if (!ar->firmware_data || !ar->firmware_len) { +		ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", +			    ar->hw_params.fw.dir, name); +		ret = -ENOMEDIUM; +		goto err; +	} + +	/* now fetch the board file */ +	if (ar->hw_params.fw.board == NULL) { +		ath10k_err("board data file not defined"); +		ret = -EINVAL; +		goto err; +	} + +	ar->board = ath10k_fetch_fw_file(ar, +					 ar->hw_params.fw.dir, +					 ar->hw_params.fw.board); +	if (IS_ERR(ar->board)) { +		ret = PTR_ERR(ar->board); +		ath10k_err("could not fetch board data '%s/%s' (%d)\n", +			   ar->hw_params.fw.dir, ar->hw_params.fw.board, +			   ret); +		goto err; +	} + +	ar->board_data = ar->board->data; +	ar->board_len = ar->board->size; + +	return 0; + +err: +	ath10k_core_free_firmware_files(ar); +	return ret; +} + +static int ath10k_core_fetch_firmware_files(struct ath10k *ar) +{ +	int ret; + +	ar->fw_api = 2; +	ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + +	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); +	if (ret == 0) +		goto success; + +	ar->fw_api = 1; +	ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + +	ret = ath10k_core_fetch_firmware_api_1(ar); +	if (ret) +		return ret; + +success: +	ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); + +	return 0; +} +  static int ath10k_init_download_firmware(struct ath10k *ar)  {  	int ret;  	ret = ath10k_download_board_data(ar); -	if (ret) +	if (ret) { +		ath10k_err("failed to download board data: %d\n", ret);  		return ret; +	}  	ret = ath10k_download_and_run_otp(ar); -	if (ret) +	if (ret) { +		ath10k_err("failed to run otp: %d\n", ret);  		return ret; +	}  	ret = ath10k_download_fw(ar); -	if (ret) +	if (ret) { +		ath10k_err("failed to download firmware: %d\n", ret);  		return ret; +	}  	return ret;  } @@ -429,10 +590,8 @@ static int ath10k_init_uart(struct ath10k *ar)  		return ret;  	} -	if (!uart_print) { -		ath10k_info("UART prints disabled\n"); +	if (!uart_print)  		return 0; -	}  	ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);  	if (ret) { @@ -446,6 +605,13 @@ static int ath10k_init_uart(struct ath10k *ar)  		return ret;  	} +	/* Set the UART baud rate to 19200. */ +	ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200); +	if (ret) { +		ath10k_warn("could not set the baud rate (%d)\n", ret); +		return ret; +	} +  	ath10k_info("UART prints enabled\n");  	return 0;  } @@ -470,8 +636,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)  	ar->hw_params = *hw_params; -	ath10k_info("Hardware name %s version 0x%x\n", -		    ar->hw_params.name, ar->target_version); +	ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", +		   ar->hw_params.name, ar->target_version);  	return 0;  } @@ -484,15 +650,19 @@ static void ath10k_core_restart(struct work_struct *work)  	switch (ar->state) {  	case ATH10K_STATE_ON: -		ath10k_halt(ar);  		ar->state = ATH10K_STATE_RESTARTING; +		del_timer_sync(&ar->scan.timeout); +		ath10k_reset_scan((unsigned long)ar);  		ieee80211_restart_hw(ar->hw);  		break;  	case ATH10K_STATE_OFF: -		/* this can happen if driver is being unloaded */ +		/* this can happen if driver is being unloaded +		 * or if the crash happens during FW probing */  		ath10k_warn("cannot restart a device that hasn't been started\n");  		break;  	case ATH10K_STATE_RESTARTING: +		/* hw restart might be requested from multiple places */ +		break;  	case ATH10K_STATE_RESTARTED:  		ar->state = ATH10K_STATE_WEDGED;  		/* fall through */ @@ -504,72 +674,12 @@ static void ath10k_core_restart(struct work_struct *work)  	mutex_unlock(&ar->conf_mutex);  } -struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, -				  const struct ath10k_hif_ops *hif_ops) -{ -	struct ath10k *ar; - -	ar = ath10k_mac_create(); -	if (!ar) -		return NULL; - -	ar->ath_common.priv = ar; -	ar->ath_common.hw = ar->hw; - -	ar->p2p = !!ath10k_p2p; -	ar->dev = dev; - -	ar->hif.priv = hif_priv; -	ar->hif.ops = hif_ops; - -	init_completion(&ar->scan.started); -	init_completion(&ar->scan.completed); -	init_completion(&ar->scan.on_channel); - -	init_completion(&ar->install_key_done); -	init_completion(&ar->vdev_setup_done); - -	setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar); - -	ar->workqueue = create_singlethread_workqueue("ath10k_wq"); -	if (!ar->workqueue) -		goto err_wq; - -	mutex_init(&ar->conf_mutex); -	spin_lock_init(&ar->data_lock); - -	INIT_LIST_HEAD(&ar->peers); -	init_waitqueue_head(&ar->peer_mapping_wq); - -	init_completion(&ar->offchan_tx_completed); -	INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); -	skb_queue_head_init(&ar->offchan_tx_queue); - -	init_waitqueue_head(&ar->event_queue); - -	INIT_WORK(&ar->restart_work, ath10k_core_restart); - -	return ar; - -err_wq: -	ath10k_mac_destroy(ar); -	return NULL; -} -EXPORT_SYMBOL(ath10k_core_create); - -void ath10k_core_destroy(struct ath10k *ar) -{ -	flush_workqueue(ar->workqueue); -	destroy_workqueue(ar->workqueue); - -	ath10k_mac_destroy(ar); -} -EXPORT_SYMBOL(ath10k_core_destroy); -  int ath10k_core_start(struct ath10k *ar)  {  	int status; +	lockdep_assert_held(&ar->conf_mutex); +  	ath10k_bmi_start(ar);  	if (ath10k_init_configure_target(ar)) { @@ -604,51 +714,116 @@ int ath10k_core_start(struct ath10k *ar)  		goto err;  	} -	status = ath10k_htc_wait_target(&ar->htc); -	if (status) +	status = ath10k_htt_init(ar); +	if (status) { +		ath10k_err("failed to init htt: %d\n", status);  		goto err_wmi_detach; +	} -	status = ath10k_htt_attach(ar); +	status = ath10k_htt_tx_alloc(&ar->htt);  	if (status) { -		ath10k_err("could not attach htt (%d)\n", status); +		ath10k_err("failed to alloc htt tx: %d\n", status);  		goto err_wmi_detach;  	} -	status = ath10k_init_connect_htc(ar); -	if (status) -		goto err_htt_detach; +	status = ath10k_htt_rx_alloc(&ar->htt); +	if (status) { +		ath10k_err("failed to alloc htt rx: %d\n", status); +		goto err_htt_tx_detach; +	} -	ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version); +	status = ath10k_hif_start(ar); +	if (status) { +		ath10k_err("could not start HIF: %d\n", status); +		goto err_htt_rx_detach; +	} -	status = ath10k_check_fw_version(ar); -	if (status) -		goto err_disconnect_htc; +	status = ath10k_htc_wait_target(&ar->htc); +	if (status) { +		ath10k_err("failed to connect to HTC: %d\n", status); +		goto err_hif_stop; +	} + +	status = ath10k_htt_connect(&ar->htt); +	if (status) { +		ath10k_err("failed to connect htt (%d)\n", status); +		goto err_hif_stop; +	} + +	status = ath10k_wmi_connect(ar); +	if (status) { +		ath10k_err("could not connect wmi: %d\n", status); +		goto err_hif_stop; +	} + +	status = ath10k_htc_start(&ar->htc); +	if (status) { +		ath10k_err("failed to start htc: %d\n", status); +		goto err_hif_stop; +	} + +	status = ath10k_wmi_wait_for_service_ready(ar); +	if (status <= 0) { +		ath10k_warn("wmi service ready event not received"); +		status = -ETIMEDOUT; +		goto err_htc_stop; +	} + +	ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n", +		   ar->hw->wiphy->fw_version);  	status = ath10k_wmi_cmd_init(ar);  	if (status) {  		ath10k_err("could not send WMI init command (%d)\n", status); -		goto err_disconnect_htc; +		goto err_htc_stop;  	}  	status = ath10k_wmi_wait_for_unified_ready(ar);  	if (status <= 0) {  		ath10k_err("wmi unified ready event not received\n");  		status = -ETIMEDOUT; -		goto err_disconnect_htc; +		goto err_htc_stop; +	} + +	status = ath10k_htt_setup(&ar->htt); +	if (status) { +		ath10k_err("failed to setup htt: %d\n", status); +		goto err_htc_stop;  	} -	status = ath10k_htt_attach_target(&ar->htt); +	status = ath10k_debug_start(ar);  	if (status) -		goto err_disconnect_htc; +		goto err_htc_stop; + +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) +		ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1; +	else +		ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + +	INIT_LIST_HEAD(&ar->arvifs); -	ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; +	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) +		ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n", +			    ar->hw_params.name, +			    ar->target_version, +			    ar->chip_id, +			    ar->hw->wiphy->fw_version, +			    ar->fw_api, +			    ar->htt.target_version_major, +			    ar->htt.target_version_minor); + +	__set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);  	return 0; -err_disconnect_htc: +err_htc_stop:  	ath10k_htc_stop(&ar->htc); -err_htt_detach: -	ath10k_htt_detach(&ar->htt); +err_hif_stop: +	ath10k_hif_stop(ar); +err_htt_rx_detach: +	ath10k_htt_rx_free(&ar->htt); +err_htt_tx_detach: +	ath10k_htt_tx_free(&ar->htt);  err_wmi_detach:  	ath10k_wmi_detach(ar);  err: @@ -656,10 +831,41 @@ err:  }  EXPORT_SYMBOL(ath10k_core_start); +int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) +{ +	int ret; + +	reinit_completion(&ar->target_suspend); + +	ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt); +	if (ret) { +		ath10k_warn("could not suspend target (%d)\n", ret); +		return ret; +	} + +	ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ); + +	if (ret == 0) { +		ath10k_warn("suspend timed out - target pause event never came\n"); +		return -ETIMEDOUT; +	} + +	return 0; +} +  void ath10k_core_stop(struct ath10k *ar)  { +	lockdep_assert_held(&ar->conf_mutex); + +	/* try to suspend target */ +	if (ar->state != ATH10K_STATE_RESTARTING) +		ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); + +	ath10k_debug_stop(ar);  	ath10k_htc_stop(&ar->htc); -	ath10k_htt_detach(&ar->htt); +	ath10k_hif_stop(ar); +	ath10k_htt_tx_free(&ar->htt); +	ath10k_htt_rx_free(&ar->htt);  	ath10k_wmi_detach(ar);  }  EXPORT_SYMBOL(ath10k_core_stop); @@ -704,27 +910,62 @@ static int ath10k_core_probe_fw(struct ath10k *ar)  		return ret;  	} +	mutex_lock(&ar->conf_mutex); +  	ret = ath10k_core_start(ar);  	if (ret) {  		ath10k_err("could not init core (%d)\n", ret);  		ath10k_core_free_firmware_files(ar);  		ath10k_hif_power_down(ar); +		mutex_unlock(&ar->conf_mutex);  		return ret;  	}  	ath10k_core_stop(ar); + +	mutex_unlock(&ar->conf_mutex); +  	ath10k_hif_power_down(ar);  	return 0;  } -int ath10k_core_register(struct ath10k *ar) +static int ath10k_core_check_chip_id(struct ath10k *ar) +{ +	u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV); + +	ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n", +		   ar->chip_id, hw_revision); + +	/* Check that we are not using hw1.0 (some of them have same pci id +	 * as hw2.0) before doing anything else as ath10k crashes horribly +	 * due to missing hw1.0 workarounds. */ +	switch (hw_revision) { +	case QCA988X_HW_1_0_CHIP_ID_REV: +		ath10k_err("ERROR: qca988x hw1.0 is not supported\n"); +		return -EOPNOTSUPP; + +	case QCA988X_HW_2_0_CHIP_ID_REV: +		/* known hardware revision, continue normally */ +		return 0; + +	default: +		ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n", +			    ar->chip_id); +		return 0; +	} + +	return 0; +} + +static void ath10k_core_register_work(struct work_struct *work)  { +	struct ath10k *ar = container_of(work, struct ath10k, register_work);  	int status;  	status = ath10k_core_probe_fw(ar);  	if (status) {  		ath10k_err("could not probe fw (%d)\n", status); -		return status; +		goto err;  	}  	status = ath10k_mac_register(ar); @@ -739,26 +980,119 @@ int ath10k_core_register(struct ath10k *ar)  		goto err_unregister_mac;  	} -	return 0; +	set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); +	return;  err_unregister_mac:  	ath10k_mac_unregister(ar);  err_release_fw:  	ath10k_core_free_firmware_files(ar); -	return status; +err: +	device_release_driver(ar->dev); +	return; +} + +int ath10k_core_register(struct ath10k *ar, u32 chip_id) +{ +	int status; + +	ar->chip_id = chip_id; + +	status = ath10k_core_check_chip_id(ar); +	if (status) { +		ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id); +		return status; +	} + +	queue_work(ar->workqueue, &ar->register_work); + +	return 0;  }  EXPORT_SYMBOL(ath10k_core_register);  void ath10k_core_unregister(struct ath10k *ar)  { +	cancel_work_sync(&ar->register_work); + +	if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) +		return; +  	/* We must unregister from mac80211 before we stop HTC and HIF.  	 * Otherwise we will fail to submit commands to FW and mac80211 will be  	 * unhappy about callback failures. */  	ath10k_mac_unregister(ar); +  	ath10k_core_free_firmware_files(ar); + +	ath10k_debug_destroy(ar);  }  EXPORT_SYMBOL(ath10k_core_unregister); +struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, +				  const struct ath10k_hif_ops *hif_ops) +{ +	struct ath10k *ar; + +	ar = ath10k_mac_create(); +	if (!ar) +		return NULL; + +	ar->ath_common.priv = ar; +	ar->ath_common.hw = ar->hw; + +	ar->p2p = !!ath10k_p2p; +	ar->dev = dev; + +	ar->hif.priv = hif_priv; +	ar->hif.ops = hif_ops; + +	init_completion(&ar->scan.started); +	init_completion(&ar->scan.completed); +	init_completion(&ar->scan.on_channel); +	init_completion(&ar->target_suspend); + +	init_completion(&ar->install_key_done); +	init_completion(&ar->vdev_setup_done); + +	setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar); + +	ar->workqueue = create_singlethread_workqueue("ath10k_wq"); +	if (!ar->workqueue) +		goto err_wq; + +	mutex_init(&ar->conf_mutex); +	spin_lock_init(&ar->data_lock); + +	INIT_LIST_HEAD(&ar->peers); +	init_waitqueue_head(&ar->peer_mapping_wq); + +	init_completion(&ar->offchan_tx_completed); +	INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); +	skb_queue_head_init(&ar->offchan_tx_queue); + +	INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work); +	skb_queue_head_init(&ar->wmi_mgmt_tx_queue); + +	INIT_WORK(&ar->register_work, ath10k_core_register_work); +	INIT_WORK(&ar->restart_work, ath10k_core_restart); + +	return ar; + +err_wq: +	ath10k_mac_destroy(ar); +	return NULL; +} +EXPORT_SYMBOL(ath10k_core_create); + +void ath10k_core_destroy(struct ath10k *ar) +{ +	flush_workqueue(ar->workqueue); +	destroy_workqueue(ar->workqueue); + +	ath10k_mac_destroy(ar); +} +EXPORT_SYMBOL(ath10k_core_destroy); +  MODULE_AUTHOR("Qualcomm Atheros");  MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");  MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index e4bba563ed4..68ceef61933 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -30,6 +30,7 @@  #include "wmi.h"  #include "../ath.h"  #include "../regd.h" +#include "../dfs_pattern_detector.h"  #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)  #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -43,27 +44,37 @@  /* Antenna noise floor */  #define ATH10K_DEFAULT_NOISE_FLOOR -95 +#define ATH10K_MAX_NUM_MGMT_PENDING 128 + +/* number of failed packets */ +#define ATH10K_KICKOUT_THRESHOLD 50 + +/* + * Use insanely high numbers to make sure that the firmware implementation + * won't start, we have the same functionality already in hostapd. Unit + * is seconds. + */ +#define ATH10K_KEEPALIVE_MIN_IDLE 3747 +#define ATH10K_KEEPALIVE_MAX_IDLE 3895 +#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900 +  struct ath10k;  struct ath10k_skb_cb {  	dma_addr_t paddr; -	bool is_mapped; -	bool is_aborted; +	u8 vdev_id;  	struct { -		u8 vdev_id; -		u16 msdu_id;  		u8 tid;  		bool is_offchan; -		bool is_conf; -		bool discard; -		bool no_ack; -		u8 refcount; -		struct sk_buff *txfrag; -		struct sk_buff *msdu; +		struct ath10k_htt_txbuf *txbuf; +		u32 txbuf_paddr;  	} __packed htt; -	/* 4 bytes left on 64bit arch */ +	struct { +		bool dtim_zero; +		bool deliver_cab; +	} bcn;  } __packed;  static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) @@ -73,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)  	return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;  } -static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb) -{ -	if (ATH10K_SKB_CB(skb)->is_mapped) -		return -EINVAL; - -	ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len, -						   DMA_TO_DEVICE); - -	if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr))) -		return -EIO; - -	ATH10K_SKB_CB(skb)->is_mapped = true; -	return 0; -} - -static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb) -{ -	if (!ATH10K_SKB_CB(skb)->is_mapped) -		return -EINVAL; - -	dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len, -			 DMA_TO_DEVICE); -	ATH10K_SKB_CB(skb)->is_mapped = false; -	return 0; -} -  static inline u32 host_interest_item_address(u32 item_offset)  {  	return QCA988X_HOST_INTEREST_ADDRESS + item_offset; @@ -108,21 +93,33 @@ struct ath10k_bmi {  	bool done_sent;  }; +#define ATH10K_MAX_MEM_REQS 16 + +struct ath10k_mem_chunk { +	void *vaddr; +	dma_addr_t paddr; +	u32 len; +	u32 req_id; +}; +  struct ath10k_wmi {  	enum ath10k_htc_ep_id eid;  	struct completion service_ready;  	struct completion unified_ready; -	atomic_t pending_tx_count; -	wait_queue_head_t wq; +	wait_queue_head_t tx_credits_wq; +	struct wmi_cmd_map *cmd; +	struct wmi_vdev_param_map *vdev_param; +	struct wmi_pdev_param_map *pdev_param; -	struct sk_buff_head wmi_event_list; -	struct work_struct wmi_event_work; +	u32 num_mem_chunks; +	struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];  };  struct ath10k_peer_stat {  	u8 peer_macaddr[ETH_ALEN];  	u32 peer_rssi;  	u32 peer_tx_rate; +	u32 peer_rx_rate; /* 10x only */  };  struct ath10k_target_stats { @@ -134,6 +131,12 @@ struct ath10k_target_stats {  	u32 cycle_count;  	u32 phy_err_count;  	u32 chan_tx_power; +	u32 ack_rx_bad; +	u32 rts_bad; +	u32 rts_good; +	u32 fcs_bad; +	u32 no_beacons; +	u32 mib_int_count;  	/* PDEV TX stats */  	s32 comp_queued; @@ -185,6 +188,14 @@ struct ath10k_target_stats {  }; +struct ath10k_dfs_stats { +	u32 phy_errors; +	u32 pulses_total; +	u32 pulses_detected; +	u32 pulses_discarded; +	u32 radar_detected; +}; +  #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */  struct ath10k_peer { @@ -195,26 +206,49 @@ struct ath10k_peer {  	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];  }; +struct ath10k_sta { +	struct ath10k_vif *arvif; + +	/* the following are protected by ar->data_lock */ +	u32 changed; /* IEEE80211_RC_* */ +	u32 bw; +	u32 nss; +	u32 smps; + +	struct work_struct update_wk; +}; +  #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)  struct ath10k_vif { +	struct list_head list; +  	u32 vdev_id;  	enum wmi_vdev_type vdev_type;  	enum wmi_vdev_subtype vdev_subtype;  	u32 beacon_interval;  	u32 dtim_period; +	struct sk_buff *beacon; +	/* protected by data_lock */ +	bool beacon_sent;  	struct ath10k *ar;  	struct ieee80211_vif *vif; +	bool is_started; +	bool is_up; +	u32 aid; +	u8 bssid[ETH_ALEN]; + +	struct work_struct wep_key_work;  	struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; -	u8 def_wep_key_index; +	u8 def_wep_key_idx; +	u8 def_wep_key_newidx;  	u16 tx_seq_no;  	union {  		struct { -			u8 bssid[ETH_ALEN];  			u32 uapsd;  		} sta;  		struct { @@ -228,10 +262,13 @@ struct ath10k_vif {  			u32 noa_len;  			u8 *noa_data;  		} ap; -		struct { -			u8 bssid[ETH_ALEN]; -		} ibss;  	} u; + +	u8 fixed_rate; +	u8 fixed_nss; +	u8 force_sgi; +	bool use_cts_prot; +	int num_legacy_stations;  };  struct ath10k_vif_iter { @@ -246,6 +283,13 @@ struct ath10k_debug {  	u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];  	struct completion event_stats_compl; + +	unsigned long htt_stats_mask; +	struct delayed_work htt_stats_dwork; +	struct ath10k_dfs_stats dfs_stats; +	struct ath_dfs_pool_stats dfs_pool_stats; + +	u32 fw_dbglog_mask;  };  enum ath10k_state { @@ -270,12 +314,37 @@ enum ath10k_state {  	ATH10K_STATE_WEDGED,  }; +enum ath10k_fw_features { +	/* wmi_mgmt_rx_hdr contains extra RSSI information */ +	ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0, + +	/* firmware from 10X branch */ +	ATH10K_FW_FEATURE_WMI_10X = 1, + +	/* firmware support tx frame management over WMI, otherwise it's HTT */ +	ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2, + +	/* Firmware does not support P2P */ +	ATH10K_FW_FEATURE_NO_P2P = 3, + +	/* keep last */ +	ATH10K_FW_FEATURE_COUNT, +}; + +enum ath10k_dev_flags { +	/* Indicates that ath10k device is during CAC phase of DFS */ +	ATH10K_CAC_RUNNING, +	ATH10K_FLAG_FIRST_BOOT_DONE, +	ATH10K_FLAG_CORE_REGISTERED, +}; +  struct ath10k {  	struct ath_common ath_common;  	struct ieee80211_hw *hw;  	struct device *dev;  	u8 mac_addr[ETH_ALEN]; +	u32 chip_id;  	u32 target_version;  	u8 fw_version_major;  	u32 fw_version_minor; @@ -288,6 +357,8 @@ struct ath10k {  	u32 vht_cap_info;  	u32 num_rf_chains; +	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); +  	struct targetdef *targetdef;  	struct hostdef *hostdef; @@ -298,8 +369,7 @@ struct ath10k {  		const struct ath10k_hif_ops *ops;  	} hif; -	wait_queue_head_t event_queue; -	bool is_target_paused; +	struct completion target_suspend;  	struct ath10k_bmi bmi;  	struct ath10k_wmi wmi; @@ -319,9 +389,19 @@ struct ath10k {  		} fw;  	} hw_params; -	const struct firmware *board_data; +	const struct firmware *board; +	const void *board_data; +	size_t board_len; +  	const struct firmware *otp; +	const void *otp_data; +	size_t otp_len; +  	const struct firmware *firmware; +	const void *firmware_data; +	size_t firmware_len; + +	int fw_api;  	struct {  		struct completion started; @@ -345,11 +425,27 @@ struct ath10k {  	/* valid during scan; needed for mgmt rx during scan */  	struct ieee80211_channel *scan_channel; +	/* current operating channel definition */ +	struct cfg80211_chan_def chandef; +  	int free_vdev_map; +	bool promisc; +	bool monitor;  	int monitor_vdev_id; -	bool monitor_enabled; -	bool monitor_present; +	bool monitor_started;  	unsigned int filter_flags; +	unsigned long dev_flags; +	u32 dfs_block_radar_events; + +	/* protected by conf_mutex */ +	bool radar_enabled; +	int num_started_vdevs; + +	/* Protected by conf-mutex */ +	u8 supp_tx_chainmask; +	u8 supp_rx_chainmask; +	u8 cfg_tx_chainmask; +	u8 cfg_rx_chainmask;  	struct wmi_pdev_set_wmm_params_arg wmm_params;  	struct completion install_key_done; @@ -364,16 +460,24 @@ struct ath10k {  	/* protects shared structure data */  	spinlock_t data_lock; +	struct list_head arvifs;  	struct list_head peers;  	wait_queue_head_t peer_mapping_wq; +	/* number of created peers; protected by data_lock */ +	int num_peers; +  	struct work_struct offchan_tx_work;  	struct sk_buff_head offchan_tx_queue;  	struct completion offchan_tx_completed;  	struct sk_buff *offchan_tx_skb; +	struct work_struct wmi_mgmt_tx_work; +	struct sk_buff_head wmi_mgmt_tx_queue; +  	enum ath10k_state state; +	struct work_struct register_work;  	struct work_struct restart_work;  	/* cycle count is reported twice for each visited channel during scan. @@ -382,6 +486,8 @@ struct ath10k {  	u32 survey_last_cycle_count;  	struct survey_info survey[ATH10K_NUM_CHANS]; +	struct dfs_pattern_detector *dfs_detector; +  #ifdef CONFIG_ATH10K_DEBUGFS  	struct ath10k_debug debug;  #endif @@ -392,8 +498,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,  void ath10k_core_destroy(struct ath10k *ar);  int ath10k_core_start(struct ath10k *ar); +int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);  void ath10k_core_stop(struct ath10k *ar); -int ath10k_core_register(struct ath10k *ar); +int ath10k_core_register(struct ath10k *ar, u32 chip_id);  void ath10k_core_unregister(struct ath10k *ar);  #endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3d65594fa09..1b7ff4ba122 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -21,6 +21,9 @@  #include "core.h"  #include "debug.h" +/* ms */ +#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 +  static int ath10k_printk(const char *level, const char *fmt, ...)  {  	struct va_format vaf; @@ -158,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,  	u8 *tmp = ev->data;  	struct ath10k_target_stats *stats;  	int num_pdev_stats, num_vdev_stats, num_peer_stats; -	struct wmi_pdev_stats *ps; +	struct wmi_pdev_stats_10x *ps;  	int i;  	spin_lock_bh(&ar->data_lock); @@ -170,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,  	num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */  	if (num_pdev_stats) { -		ps = (struct wmi_pdev_stats *)tmp; +		ps = (struct wmi_pdev_stats_10x *)tmp;  		stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);  		stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); @@ -225,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,  		stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);  		stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); -		tmp += sizeof(struct wmi_pdev_stats); +		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, +			     ar->fw_features)) { +			stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad); +			stats->rts_bad = __le32_to_cpu(ps->rts_bad); +			stats->rts_good = __le32_to_cpu(ps->rts_good); +			stats->fcs_bad = __le32_to_cpu(ps->fcs_bad); +			stats->no_beacons = __le32_to_cpu(ps->no_beacons); +			stats->mib_int_count = __le32_to_cpu(ps->mib_int_count); +			tmp += sizeof(struct wmi_pdev_stats_10x); +		} else { +			tmp += sizeof(struct wmi_pdev_stats_old); +		}  	}  	/* 0 or max vdevs */ @@ -240,27 +254,33 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,  	}  	if (num_peer_stats) { -		struct wmi_peer_stats *peer_stats; +		struct wmi_peer_stats_10x *peer_stats;  		struct ath10k_peer_stat *s;  		stats->peers = num_peer_stats;  		for (i = 0; i < num_peer_stats; i++) { -			peer_stats = (struct wmi_peer_stats *)tmp; +			peer_stats = (struct wmi_peer_stats_10x *)tmp;  			s = &stats->peer_stat[i]; -			WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, -						   s->peer_macaddr); +			memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr, +			       ETH_ALEN);  			s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);  			s->peer_tx_rate =  				__le32_to_cpu(peer_stats->peer_tx_rate); - -			tmp += sizeof(struct wmi_peer_stats); +			if (test_bit(ATH10K_FW_FEATURE_WMI_10X, +				     ar->fw_features)) { +				s->peer_rx_rate = +					__le32_to_cpu(peer_stats->peer_rx_rate); +				tmp += sizeof(struct wmi_peer_stats_10x); + +			} else { +				tmp += sizeof(struct wmi_peer_stats_old); +			}  		}  	}  	spin_unlock_bh(&ar->data_lock); -	mutex_unlock(&ar->conf_mutex);  	complete(&ar->debug.event_stats_compl);  } @@ -270,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,  	struct ath10k *ar = file->private_data;  	struct ath10k_target_stats *fw_stats;  	char *buf = NULL; -	unsigned int len = 0, buf_len = 2500; +	unsigned int len = 0, buf_len = 8000;  	ssize_t ret_cnt = 0;  	long left;  	int i; @@ -318,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,  			 "Cycle count", fw_stats->cycle_count);  	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",  			 "PHY error count", fw_stats->phy_err_count); +	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", +			 "RTS bad count", fw_stats->rts_bad); +	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", +			 "RTS good count", fw_stats->rts_good); +	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", +			 "FCS bad count", fw_stats->fcs_bad); +	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", +			 "No beacon count", fw_stats->no_beacons); +	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", +			 "MIB int count", fw_stats->mib_int_count);  	len += scnprintf(buf + len, buf_len - len, "\n");  	len += scnprintf(buf + len, buf_len - len, "%30s\n", @@ -409,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,  			 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);  	len += scnprintf(buf + len, buf_len - len, "\n"); -	len += scnprintf(buf + len, buf_len - len, "%30s\n", -			 "ath10k PEER stats"); +	len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n", +			 "ath10k PEER stats", fw_stats->peers);  	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",  				 "================="); @@ -423,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,  		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",  				 "Peer TX rate",  				 fw_stats->peer_stat[i].peer_tx_rate); +		len += scnprintf(buf + len, buf_len - len, "%30s %u\n", +				 "Peer RX rate", +				 fw_stats->peer_stat[i].peer_rx_rate);  		len += scnprintf(buf + len, buf_len - len, "\n");  	}  	spin_unlock_bh(&ar->data_lock); @@ -449,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,  					     char __user *user_buf,  					     size_t count, loff_t *ppos)  { -	const char buf[] = "To simulate firmware crash write the keyword" -			   " `crash` to this file.\nThis will force firmware" -			   " to report a crash to the host system.\n"; +	const char buf[] = "To simulate firmware crash write one of the" +			   " keywords to this file:\n `soft` - this will send" +			   " WMI_FORCE_FW_HANG_ASSERT to firmware if FW" +			   " supports that command.\n `hard` - this will send" +			   " to firmware command with illegal parameters" +			   " causing firmware crash.\n"; +  	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));  } +/* Simulate firmware crash: + * 'soft': Call wmi command causing firmware hang. This firmware hang is + * recoverable by warm firmware reset. + * 'hard': Force firmware crash by setting any vdev parameter for not allowed + * vdev id. This is hard firmware crash because it is recoverable only by cold + * firmware reset. + */  static ssize_t ath10k_write_simulate_fw_crash(struct file *file,  					      const char __user *user_buf,  					      size_t count, loff_t *ppos)  {  	struct ath10k *ar = file->private_data; -	char buf[32] = {}; +	char buf[32];  	int ret;  	mutex_lock(&ar->conf_mutex);  	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); -	if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) { -		ret = -EINVAL; -		goto exit; -	} + +	/* make sure that buf is null terminated */ +	buf[sizeof(buf) - 1] = 0;  	if (ar->state != ATH10K_STATE_ON &&  	    ar->state != ATH10K_STATE_RESTARTED) { @@ -477,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,  		goto exit;  	} -	ath10k_info("simulating firmware crash\n"); +	/* drop the possible '\n' from the end */ +	if (buf[count - 1] == '\n') { +		buf[count - 1] = 0; +		count--; +	} -	ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); -	if (ret) -		ath10k_warn("failed to force fw hang (%d)\n", ret); +	if (!strcmp(buf, "soft")) { +		ath10k_info("simulating soft firmware crash\n"); +		ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); +	} else if (!strcmp(buf, "hard")) { +		ath10k_info("simulating hard firmware crash\n"); +		ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1, +					ar->wmi.vdev_param->rts_threshold, 0); +	} else { +		ret = -EINVAL; +		goto exit; +	} + +	if (ret) { +		ath10k_warn("failed to simulate firmware crash: %d\n", ret); +		goto exit; +	} -	if (ret == 0) -		ret = count; +	ret = count;  exit:  	mutex_unlock(&ar->conf_mutex); @@ -499,6 +558,287 @@ static const struct file_operations fops_simulate_fw_crash = {  	.llseek = default_llseek,  }; +static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, +				   size_t count, loff_t *ppos) +{ +	struct ath10k *ar = file->private_data; +	unsigned int len; +	char buf[50]; + +	len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id); + +	return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_chip_id = { +	.read = ath10k_read_chip_id, +	.open = simple_open, +	.owner = THIS_MODULE, +	.llseek = default_llseek, +}; + +static int ath10k_debug_htt_stats_req(struct ath10k *ar) +{ +	u64 cookie; +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	if (ar->debug.htt_stats_mask == 0) +		/* htt stats are disabled */ +		return 0; + +	if (ar->state != ATH10K_STATE_ON) +		return 0; + +	cookie = get_jiffies_64(); + +	ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, +				       cookie); +	if (ret) { +		ath10k_warn("failed to send htt stats request: %d\n", ret); +		return ret; +	} + +	queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork, +			   msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL)); + +	return 0; +} + +static void ath10k_debug_htt_stats_dwork(struct work_struct *work) +{ +	struct ath10k *ar = container_of(work, struct ath10k, +					 debug.htt_stats_dwork.work); + +	mutex_lock(&ar->conf_mutex); + +	ath10k_debug_htt_stats_req(ar); + +	mutex_unlock(&ar->conf_mutex); +} + +static ssize_t ath10k_read_htt_stats_mask(struct file *file, +					    char __user *user_buf, +					    size_t count, loff_t *ppos) +{ +	struct ath10k *ar = file->private_data; +	char buf[32]; +	unsigned int len; + +	len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); + +	return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_write_htt_stats_mask(struct file *file, +					     const char __user *user_buf, +					     size_t count, loff_t *ppos) +{ +	struct ath10k *ar = file->private_data; +	unsigned long mask; +	int ret; + +	ret = kstrtoul_from_user(user_buf, count, 0, &mask); +	if (ret) +		return ret; + +	/* max 8 bit masks (for now) */ +	if (mask > 0xff) +		return -E2BIG; + +	mutex_lock(&ar->conf_mutex); + +	ar->debug.htt_stats_mask = mask; + +	ret = ath10k_debug_htt_stats_req(ar); +	if (ret) +		goto out; + +	ret = count; + +out: +	mutex_unlock(&ar->conf_mutex); + +	return ret; +} + +static const struct file_operations fops_htt_stats_mask = { +	.read = ath10k_read_htt_stats_mask, +	.write = ath10k_write_htt_stats_mask, +	.open = simple_open, +	.owner = THIS_MODULE, +	.llseek = default_llseek, +}; + +static ssize_t ath10k_read_fw_dbglog(struct file *file, +					    char __user *user_buf, +					    size_t count, loff_t *ppos) +{ +	struct ath10k *ar = file->private_data; +	unsigned int len; +	char buf[32]; + +	len = scnprintf(buf, sizeof(buf), "0x%08x\n", +			ar->debug.fw_dbglog_mask); + +	return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_write_fw_dbglog(struct file *file, +				      const char __user *user_buf, +				      size_t count, loff_t *ppos) +{ +	struct ath10k *ar = file->private_data; +	unsigned long mask; +	int ret; + +	ret = kstrtoul_from_user(user_buf, count, 0, &mask); +	if (ret) +		return ret; + +	mutex_lock(&ar->conf_mutex); + +	ar->debug.fw_dbglog_mask = mask; + +	if (ar->state == ATH10K_STATE_ON) { +		ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); +		if (ret) { +			ath10k_warn("dbglog cfg failed from debugfs: %d\n", +				    ret); +			goto exit; +		} +	} + +	ret = count; + +exit: +	mutex_unlock(&ar->conf_mutex); + +	return ret; +} + +static const struct file_operations fops_fw_dbglog = { +	.read = ath10k_read_fw_dbglog, +	.write = ath10k_write_fw_dbglog, +	.open = simple_open, +	.owner = THIS_MODULE, +	.llseek = default_llseek, +}; + +int ath10k_debug_start(struct ath10k *ar) +{ +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	ret = ath10k_debug_htt_stats_req(ar); +	if (ret) +		/* continue normally anyway, this isn't serious */ +		ath10k_warn("failed to start htt stats workqueue: %d\n", ret); + +	if (ar->debug.fw_dbglog_mask) { +		ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); +		if (ret) +			/* not serious */ +			ath10k_warn("failed to enable dbglog during start: %d", +				    ret); +	} + +	return 0; +} + +void ath10k_debug_stop(struct ath10k *ar) +{ +	lockdep_assert_held(&ar->conf_mutex); + +	/* Must not use _sync to avoid deadlock, we do that in +	 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid +	 * warning from del_timer(). */ +	if (ar->debug.htt_stats_mask != 0) +		cancel_delayed_work(&ar->debug.htt_stats_dwork); +} + +static ssize_t ath10k_write_simulate_radar(struct file *file, +					   const char __user *user_buf, +					   size_t count, loff_t *ppos) +{ +	struct ath10k *ar = file->private_data; + +	ieee80211_radar_detected(ar->hw); + +	return count; +} + +static const struct file_operations fops_simulate_radar = { +	.write = ath10k_write_simulate_radar, +	.open = simple_open, +	.owner = THIS_MODULE, +	.llseek = default_llseek, +}; + +#define ATH10K_DFS_STAT(s, p) (\ +	len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ +			 ar->debug.dfs_stats.p)) + +#define ATH10K_DFS_POOL_STAT(s, p) (\ +	len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \ +			 ar->debug.dfs_pool_stats.p)) + +static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf, +				     size_t count, loff_t *ppos) +{ +	int retval = 0, len = 0; +	const int size = 8000; +	struct ath10k *ar = file->private_data; +	char *buf; + +	buf = kzalloc(size, GFP_KERNEL); +	if (buf == NULL) +		return -ENOMEM; + +	if (!ar->dfs_detector) { +		len += scnprintf(buf + len, size - len, "DFS not enabled\n"); +		goto exit; +	} + +	ar->debug.dfs_pool_stats = +			ar->dfs_detector->get_stats(ar->dfs_detector); + +	len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n"); + +	ATH10K_DFS_STAT("reported phy errors", phy_errors); +	ATH10K_DFS_STAT("pulse events reported", pulses_total); +	ATH10K_DFS_STAT("DFS pulses detected", pulses_detected); +	ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded); +	ATH10K_DFS_STAT("Radars detected", radar_detected); + +	len += scnprintf(buf + len, size - len, "Global Pool statistics:\n"); +	ATH10K_DFS_POOL_STAT("Pool references", pool_reference); +	ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated); +	ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error); +	ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used); +	ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated); +	ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error); +	ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used); + +exit: +	if (len > size) +		len = size; + +	retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); +	kfree(buf); + +	return retval; +} + +static const struct file_operations fops_dfs_stats = { +	.read = ath10k_read_dfs_stats, +	.open = simple_open, +	.owner = THIS_MODULE, +	.llseek = default_llseek, +}; +  int ath10k_debug_create(struct ath10k *ar)  {  	ar->debug.debugfs_phy = debugfs_create_dir("ath10k", @@ -507,6 +847,9 @@ int ath10k_debug_create(struct ath10k *ar)  	if (!ar->debug.debugfs_phy)  		return -ENOMEM; +	INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, +			  ath10k_debug_htt_stats_dwork); +  	init_completion(&ar->debug.event_stats_compl);  	debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, @@ -518,8 +861,37 @@ int ath10k_debug_create(struct ath10k *ar)  	debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,  			    ar, &fops_simulate_fw_crash); +	debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, +			    ar, &fops_chip_id); + +	debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy, +			    ar, &fops_htt_stats_mask); + +	debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy, +			    ar, &fops_fw_dbglog); + +	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { +		debugfs_create_file("dfs_simulate_radar", S_IWUSR, +				    ar->debug.debugfs_phy, ar, +				    &fops_simulate_radar); + +		debugfs_create_bool("dfs_block_radar_events", S_IWUSR, +				    ar->debug.debugfs_phy, +				    &ar->dfs_block_radar_events); + +		debugfs_create_file("dfs_stats", S_IRUSR, +				    ar->debug.debugfs_phy, ar, +				    &fops_dfs_stats); +	} +  	return 0;  } + +void ath10k_debug_destroy(struct ath10k *ar) +{ +	cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); +} +  #endif /* CONFIG_ATH10K_DEBUGFS */  #ifdef CONFIG_ATH10K_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 168140c5402..a5824990bd2 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -27,34 +27,54 @@ enum ath10k_debug_mask {  	ATH10K_DBG_HTC		= 0x00000004,  	ATH10K_DBG_HTT		= 0x00000008,  	ATH10K_DBG_MAC		= 0x00000010, -	ATH10K_DBG_CORE		= 0x00000020, +	ATH10K_DBG_BOOT		= 0x00000020,  	ATH10K_DBG_PCI_DUMP	= 0x00000040,  	ATH10K_DBG_HTT_DUMP	= 0x00000080,  	ATH10K_DBG_MGMT		= 0x00000100,  	ATH10K_DBG_DATA		= 0x00000200, +	ATH10K_DBG_BMI		= 0x00000400, +	ATH10K_DBG_REGULATORY	= 0x00000800,  	ATH10K_DBG_ANY		= 0xffffffff,  };  extern unsigned int ath10k_debug_mask; -extern __printf(1, 2) int ath10k_info(const char *fmt, ...); -extern __printf(1, 2) int ath10k_err(const char *fmt, ...); -extern __printf(1, 2) int ath10k_warn(const char *fmt, ...); +__printf(1, 2) int ath10k_info(const char *fmt, ...); +__printf(1, 2) int ath10k_err(const char *fmt, ...); +__printf(1, 2) int ath10k_warn(const char *fmt, ...);  #ifdef CONFIG_ATH10K_DEBUGFS +int ath10k_debug_start(struct ath10k *ar); +void ath10k_debug_stop(struct ath10k *ar);  int ath10k_debug_create(struct ath10k *ar); +void ath10k_debug_destroy(struct ath10k *ar);  void ath10k_debug_read_service_map(struct ath10k *ar,  				   void *service_map,  				   size_t map_size);  void ath10k_debug_read_target_stats(struct ath10k *ar,  				    struct wmi_stats_event *ev); +#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) +  #else +static inline int ath10k_debug_start(struct ath10k *ar) +{ +	return 0; +} + +static inline void ath10k_debug_stop(struct ath10k *ar) +{ +} +  static inline int ath10k_debug_create(struct ath10k *ar)  {  	return 0;  } +static inline void ath10k_debug_destroy(struct ath10k *ar) +{ +} +  static inline void ath10k_debug_read_service_map(struct ath10k *ar,  						 void *service_map,  						 size_t map_size) @@ -65,11 +85,14 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,  						  struct wmi_stats_event *ev)  {  } + +#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) +  #endif /* CONFIG_ATH10K_DEBUGFS */  #ifdef CONFIG_ATH10K_DEBUG -extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, -				      const char *fmt, ...); +__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, +			       const char *fmt, ...);  void ath10k_dbg_dump(enum ath10k_debug_mask mask,  		     const char *msg, const char *prefix,  		     const void *buf, size_t len); diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index dcdea68bcc0..2ac7beacddc 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -21,6 +21,14 @@  #include <linux/kernel.h>  #include "core.h" +struct ath10k_hif_sg_item { +	u16 transfer_id; +	void *transfer_context; /* NULL = tx completion callback not called */ +	void *vaddr; /* for debugging mostly */ +	u32 paddr; +	u16 len; +}; +  struct ath10k_hif_cb {  	int (*tx_completion)(struct ath10k *ar,  			     struct sk_buff *wbuf, @@ -31,11 +39,9 @@ struct ath10k_hif_cb {  };  struct ath10k_hif_ops { -	/* Send the head of a buffer to HIF for transmission to the target. */ -	int (*send_head)(struct ath10k *ar, u8 pipe_id, -			 unsigned int transfer_id, -			 unsigned int nbytes, -			 struct sk_buff *buf); +	/* send a scatter-gather list to the target */ +	int (*tx_sg)(struct ath10k *ar, u8 pipe_id, +		     struct ath10k_hif_sg_item *items, int n_items);  	/*  	 * API to handle HIF-specific BMI message exchanges, this API is @@ -86,12 +92,11 @@ struct ath10k_hif_ops {  }; -static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id, -				       unsigned int transfer_id, -				       unsigned int nbytes, -				       struct sk_buff *buf) +static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, +				   struct ath10k_hif_sg_item *items, +				   int n_items)  { -	return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf); +	return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);  }  static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index ef3329ef52f..e493db4b4a4 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)  static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,  					     struct sk_buff *skb)  { -	ath10k_skb_unmap(htc->ar->dev, skb); +	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); + +	dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);  	skb_pull(skb, sizeof(struct ath10k_htc_hdr));  } @@ -103,10 +105,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,  	struct ath10k_htc_hdr *hdr;  	hdr = (struct ath10k_htc_hdr *)skb->data; -	memset(hdr, 0, sizeof(*hdr));  	hdr->eid = ep->eid;  	hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); +	hdr->flags = 0;  	spin_lock_bh(&ep->htc->tx_lock);  	hdr->seq_no = ep->seq_no++; @@ -117,134 +119,16 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,  	spin_unlock_bh(&ep->htc->tx_lock);  } -static int ath10k_htc_issue_skb(struct ath10k_htc *htc, -				struct ath10k_htc_ep *ep, -				struct sk_buff *skb, -				u8 credits) -{ -	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); -	int ret; - -	ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, -		   ep->eid, skb); - -	ath10k_htc_prepare_tx_skb(ep, skb); - -	ret = ath10k_skb_map(htc->ar->dev, skb); -	if (ret) -		goto err; - -	ret = ath10k_hif_send_head(htc->ar, -				   ep->ul_pipe_id, -				   ep->eid, -				   skb->len, -				   skb); -	if (unlikely(ret)) -		goto err; - -	return 0; -err: -	ath10k_warn("HTC issue failed: %d\n", ret); - -	spin_lock_bh(&htc->tx_lock); -	ep->tx_credits += credits; -	spin_unlock_bh(&htc->tx_lock); - -	/* this is the simplest way to handle out-of-resources for non-credit -	 * based endpoints. credit based endpoints can still get -ENOSR, but -	 * this is highly unlikely as credit reservation should prevent that */ -	if (ret == -ENOSR) { -		spin_lock_bh(&htc->tx_lock); -		__skb_queue_head(&ep->tx_queue, skb); -		spin_unlock_bh(&htc->tx_lock); - -		return ret; -	} - -	skb_cb->is_aborted = true; -	ath10k_htc_notify_tx_completion(ep, skb); - -	return ret; -} - -static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc, -						       struct ath10k_htc_ep *ep, -						       u8 *credits) -{ -	struct sk_buff *skb; -	struct ath10k_skb_cb *skb_cb; -	int credits_required; -	int remainder; -	unsigned int transfer_len; - -	lockdep_assert_held(&htc->tx_lock); - -	skb = __skb_dequeue(&ep->tx_queue); -	if (!skb) -		return NULL; - -	skb_cb = ATH10K_SKB_CB(skb); -	transfer_len = skb->len; - -	if (likely(transfer_len <= htc->target_credit_size)) { -		credits_required = 1; -	} else { -		/* figure out how many credits this message requires */ -		credits_required = transfer_len / htc->target_credit_size; -		remainder = transfer_len % htc->target_credit_size; - -		if (remainder) -			credits_required++; -	} - -	ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n", -		   credits_required, ep->tx_credits); - -	if (ep->tx_credits < credits_required) { -		__skb_queue_head(&ep->tx_queue, skb); -		return NULL; -	} - -	ep->tx_credits -= credits_required; -	*credits = credits_required; -	return skb; -} - -static void ath10k_htc_send_work(struct work_struct *work) -{ -	struct ath10k_htc_ep *ep = container_of(work, -					struct ath10k_htc_ep, send_work); -	struct ath10k_htc *htc = ep->htc; -	struct sk_buff *skb; -	u8 credits = 0; -	int ret; - -	while (true) { -		if (ep->ul_is_polled) -			ath10k_htc_send_complete_check(ep, 0); - -		spin_lock_bh(&htc->tx_lock); -		if (ep->tx_credit_flow_enabled) -			skb = ath10k_htc_get_skb_credit_based(htc, ep, -							      &credits); -		else -			skb = __skb_dequeue(&ep->tx_queue); -		spin_unlock_bh(&htc->tx_lock); - -		if (!skb) -			break; - -		ret = ath10k_htc_issue_skb(htc, ep, skb, credits); -		if (ret == -ENOSR) -			break; -	} -} -  int ath10k_htc_send(struct ath10k_htc *htc,  		    enum ath10k_htc_ep_id eid,  		    struct sk_buff *skb)  {  	struct ath10k_htc_ep *ep = &htc->endpoint[eid]; +	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); +	struct ath10k_hif_sg_item sg_item; +	struct device *dev = htc->ar->dev; +	int credits = 0; +	int ret;  	if (htc->ar->state == ATH10K_STATE_WEDGED)  		return -ECOMM; @@ -254,18 +138,67 @@ int ath10k_htc_send(struct ath10k_htc *htc,  		return -ENOENT;  	} +	/* FIXME: This looks ugly, can we fix it? */  	spin_lock_bh(&htc->tx_lock);  	if (htc->stopped) {  		spin_unlock_bh(&htc->tx_lock);  		return -ESHUTDOWN;  	} +	spin_unlock_bh(&htc->tx_lock); -	__skb_queue_tail(&ep->tx_queue, skb);  	skb_push(skb, sizeof(struct ath10k_htc_hdr)); -	spin_unlock_bh(&htc->tx_lock); -	queue_work(htc->ar->workqueue, &ep->send_work); +	if (ep->tx_credit_flow_enabled) { +		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); +		spin_lock_bh(&htc->tx_lock); +		if (ep->tx_credits < credits) { +			spin_unlock_bh(&htc->tx_lock); +			ret = -EAGAIN; +			goto err_pull; +		} +		ep->tx_credits -= credits; +		ath10k_dbg(ATH10K_DBG_HTC, +			   "htc ep %d consumed %d credits (total %d)\n", +			   eid, credits, ep->tx_credits); +		spin_unlock_bh(&htc->tx_lock); +	} + +	ath10k_htc_prepare_tx_skb(ep, skb); + +	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); +	ret = dma_mapping_error(dev, skb_cb->paddr); +	if (ret) +		goto err_credits; + +	sg_item.transfer_id = ep->eid; +	sg_item.transfer_context = skb; +	sg_item.vaddr = skb->data; +	sg_item.paddr = skb_cb->paddr; +	sg_item.len = skb->len; + +	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1); +	if (ret) +		goto err_unmap; +  	return 0; + +err_unmap: +	dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); +err_credits: +	if (ep->tx_credit_flow_enabled) { +		spin_lock_bh(&htc->tx_lock); +		ep->tx_credits += credits; +		ath10k_dbg(ATH10K_DBG_HTC, +			   "htc ep %d reverted %d credits back (total %d)\n", +			   eid, credits, ep->tx_credits); +		spin_unlock_bh(&htc->tx_lock); + +		if (ep->ep_ops.ep_tx_credits) +			ep->ep_ops.ep_tx_credits(htc->ar); +	} +err_pull: +	skb_pull(skb, sizeof(struct ath10k_htc_hdr)); +	return ret;  }  static int ath10k_htc_tx_completion_handler(struct ath10k *ar, @@ -275,42 +208,15 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,  	struct ath10k_htc *htc = &ar->htc;  	struct ath10k_htc_ep *ep = &htc->endpoint[eid]; +	if (WARN_ON_ONCE(!skb)) +		return 0; +  	ath10k_htc_notify_tx_completion(ep, skb);  	/* the skb now belongs to the completion handler */ -	/* note: when using TX credit flow, the re-checking of queues happens -	 * when credits flow back from the target.  in the non-TX credit case, -	 * we recheck after the packet completes */ -	spin_lock_bh(&htc->tx_lock); -	if (!ep->tx_credit_flow_enabled && !htc->stopped) -		queue_work(ar->workqueue, &ep->send_work); -	spin_unlock_bh(&htc->tx_lock); -  	return 0;  } -/* flush endpoint TX queue */ -static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc, -					 struct ath10k_htc_ep *ep) -{ -	struct sk_buff *skb; -	struct ath10k_skb_cb *skb_cb; - -	spin_lock_bh(&htc->tx_lock); -	for (;;) { -		skb = __skb_dequeue(&ep->tx_queue); -		if (!skb) -			break; - -		skb_cb = ATH10K_SKB_CB(skb); -		skb_cb->is_aborted = true; -		ath10k_htc_notify_tx_completion(ep, skb); -	} -	spin_unlock_bh(&htc->tx_lock); - -	cancel_work_sync(&ep->send_work); -} -  /***********/  /* Receive */  /***********/ @@ -334,14 +240,17 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,  		if (report->eid >= ATH10K_HTC_EP_COUNT)  			break; -		ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n", -			   report->eid, report->credits); -  		ep = &htc->endpoint[report->eid];  		ep->tx_credits += report->credits; -		if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue)) -			queue_work(htc->ar->workqueue, &ep->send_work); +		ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", +			   report->eid, report->credits, ep->tx_credits); + +		if (ep->ep_ops.ep_tx_credits) { +			spin_unlock_bh(&htc->tx_lock); +			ep->ep_ops.ep_tx_credits(htc->ar); +			spin_lock_bh(&htc->tx_lock); +		}  	}  	spin_unlock_bh(&htc->tx_lock);  } @@ -599,10 +508,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)  		ep->max_ep_message_len = 0;  		ep->max_tx_queue_depth = 0;  		ep->eid = i; -		skb_queue_head_init(&ep->tx_queue);  		ep->htc = htc;  		ep->tx_credit_flow_enabled = true; -		INIT_WORK(&ep->send_work, ath10k_htc_send_work);  	}  } @@ -647,14 +554,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)  	u16 credit_count;  	u16 credit_size; -	INIT_COMPLETION(htc->ctl_resp); - -	status = ath10k_hif_start(htc->ar); -	if (status) { -		ath10k_err("could not start HIF (%d)\n", status); -		goto err_start; -	} -  	status = wait_for_completion_timeout(&htc->ctl_resp,  					     ATH10K_HTC_WAIT_TIMEOUT_HZ);  	if (status <= 0) { @@ -662,15 +561,13 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)  			status = -ETIMEDOUT;  		ath10k_err("ctl_resp never came in (%d)\n", status); -		goto err_target; +		return status;  	}  	if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {  		ath10k_err("Invalid HTC ready msg len:%d\n",  			   htc->control_resp_len); - -		status = -ECOMM; -		goto err_target; +		return -ECOMM;  	}  	msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; @@ -680,8 +577,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)  	if (message_id != ATH10K_HTC_MSG_READY_ID) {  		ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); -		status = -ECOMM; -		goto err_target; +		return -ECOMM;  	}  	htc->total_transmit_credits = credit_count; @@ -694,9 +590,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)  	if ((htc->total_transmit_credits == 0) ||  	    (htc->target_credit_size == 0)) { -		status = -ECOMM;  		ath10k_err("Invalid credit size received\n"); -		goto err_target; +		return -ECOMM;  	}  	ath10k_htc_setup_target_buffer_assignments(htc); @@ -713,14 +608,10 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)  	status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);  	if (status) {  		ath10k_err("could not connect to htc service (%d)\n", status); -		goto err_target; +		return status;  	}  	return 0; -err_target: -	ath10k_hif_stop(htc->ar); -err_start: -	return status;  }  int ath10k_htc_connect_service(struct ath10k_htc *htc, @@ -752,8 +643,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,  	tx_alloc = ath10k_htc_get_credit_allocation(htc,  						    conn_req->service_id);  	if (!tx_alloc) -		ath10k_dbg(ATH10K_DBG_HTC, -			   "HTC Service %s does not allocate target credits\n", +		ath10k_dbg(ATH10K_DBG_BOOT, +			   "boot htc service %s does not allocate target credits\n",  			   htc_service_name(conn_req->service_id));  	skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); @@ -772,17 +663,17 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,  	flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); -	req_msg = &msg->connect_service; -	req_msg->flags = __cpu_to_le16(flags); -	req_msg->service_id = __cpu_to_le16(conn_req->service_id); -  	/* Only enable credit flow control for WMI ctrl service */  	if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {  		flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;  		disable_credit_flow_ctrl = true;  	} -	INIT_COMPLETION(htc->ctl_resp); +	req_msg = &msg->connect_service; +	req_msg->flags = __cpu_to_le16(flags); +	req_msg->service_id = __cpu_to_le16(conn_req->service_id); + +	reinit_completion(&htc->ctl_resp);  	status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);  	if (status) { @@ -873,19 +764,19 @@ setup:  	if (status)  		return status; -	ath10k_dbg(ATH10K_DBG_HTC, -		   "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n", +	ath10k_dbg(ATH10K_DBG_BOOT, +		   "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",  		   htc_service_name(ep->service_id), ep->ul_pipe_id,  		   ep->dl_pipe_id, ep->eid); -	ath10k_dbg(ATH10K_DBG_HTC, -		   "EP %d UL polled: %d, DL polled: %d\n", +	ath10k_dbg(ATH10K_DBG_BOOT, +		   "boot htc ep %d ul polled %d dl polled %d\n",  		   ep->eid, ep->ul_is_polled, ep->dl_is_polled);  	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {  		ep->tx_credit_flow_enabled = false; -		ath10k_dbg(ATH10K_DBG_HTC, -			   "HTC service: %s eid: %d TX flow control disabled\n", +		ath10k_dbg(ATH10K_DBG_BOOT, +			   "boot htc service '%s' eid %d TX flow control disabled\n",  			   htc_service_name(ep->service_id), assigned_eid);  	} @@ -939,25 +830,11 @@ int ath10k_htc_start(struct ath10k_htc *htc)  	return 0;  } -/* - * stop HTC communications, i.e. stop interrupt reception, and flush all - * queued buffers - */  void ath10k_htc_stop(struct ath10k_htc *htc)  { -	int i; -	struct ath10k_htc_ep *ep; -  	spin_lock_bh(&htc->tx_lock);  	htc->stopped = true;  	spin_unlock_bh(&htc->tx_lock); - -	for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { -		ep = &htc->endpoint[i]; -		ath10k_htc_flush_endpoint_tx(htc, ep); -	} - -	ath10k_hif_stop(htc->ar);  }  /* registered target arrival callback from the HIF layer */ diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index e1dd8c76185..4716d331e6b 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -276,6 +276,7 @@ struct ath10k_htc_ops {  struct ath10k_htc_ep_ops {  	void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);  	void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); +	void (*ep_tx_credits)(struct ath10k *);  };  /* service connection information */ @@ -315,15 +316,11 @@ struct ath10k_htc_ep {  	int ul_is_polled; /* call HIF to get tx completions */  	int dl_is_polled; /* call HIF to fetch rx (not implemented) */ -	struct sk_buff_head tx_queue; -  	u8 seq_no; /* for debugging */  	int tx_credits;  	int tx_credit_size;  	int tx_credits_per_max_message;  	bool tx_credit_flow_enabled; - -	struct work_struct send_work;  };  struct ath10k_htc_svc_tx_credits { diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 39342c5cfcb..19c12cc8d66 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -22,7 +22,7 @@  #include "core.h"  #include "debug.h" -static int ath10k_htt_htc_attach(struct ath10k_htt *htt) +int ath10k_htt_connect(struct ath10k_htt *htt)  {  	struct ath10k_htc_svc_conn_req conn_req;  	struct ath10k_htc_svc_conn_resp conn_resp; @@ -48,39 +48,14 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)  	return 0;  } -int ath10k_htt_attach(struct ath10k *ar) +int ath10k_htt_init(struct ath10k *ar)  {  	struct ath10k_htt *htt = &ar->htt; -	int ret;  	htt->ar = ar;  	htt->max_throughput_mbps = 800;  	/* -	 * Connect to HTC service. -	 * This has to be done before calling ath10k_htt_rx_attach, -	 * since ath10k_htt_rx_attach involves sending a rx ring configure -	 * message to the target. -	 */ -	ret = ath10k_htt_htc_attach(htt); -	if (ret) { -		ath10k_err("could not attach htt htc (%d)\n", ret); -		goto err_htc_attach; -	} - -	ret = ath10k_htt_tx_attach(htt); -	if (ret) { -		ath10k_err("could not attach htt tx (%d)\n", ret); -		goto err_htc_attach; -	} - -	ret = ath10k_htt_rx_attach(htt); -	if (ret) { -		ath10k_err("could not attach htt rx (%d)\n", ret); -		goto err_rx_attach; -	} - -	/*  	 * Prefetch enough data to satisfy target  	 * classification engine.  	 * This is for LL chips. HL chips will probably @@ -93,36 +68,26 @@ int ath10k_htt_attach(struct ath10k *ar)  		2; /* ip4 dscp or ip6 priority */  	return 0; - -err_rx_attach: -	ath10k_htt_tx_detach(htt); -err_htc_attach: -	return ret;  }  #define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)  static int ath10k_htt_verify_version(struct ath10k_htt *htt)  { -	ath10k_dbg(ATH10K_DBG_HTT, -		   "htt target version %d.%d; host version %d.%d\n", -		    htt->target_version_major, -		    htt->target_version_minor, -		    HTT_CURRENT_VERSION_MAJOR, -		    HTT_CURRENT_VERSION_MINOR); - -	if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) { -		ath10k_err("htt major versions are incompatible!\n"); +	ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n", +		   htt->target_version_major, htt->target_version_minor); + +	if (htt->target_version_major != 2 && +	    htt->target_version_major != 3) { +		ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n", +			   htt->target_version_major);  		return -ENOTSUPP;  	} -	if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR) -		ath10k_warn("htt minor version differ but still compatible\n"); -  	return 0;  } -int ath10k_htt_attach_target(struct ath10k_htt *htt) +int ath10k_htt_setup(struct ath10k_htt *htt)  {  	int status; @@ -145,9 +110,3 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt)  	return ath10k_htt_send_rx_ring_cfg_ll(htt);  } - -void ath10k_htt_detach(struct ath10k_htt *htt) -{ -	ath10k_htt_rx_detach(htt); -	ath10k_htt_tx_detach(htt); -} diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 318be4629cd..9a263462c79 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -19,13 +19,13 @@  #define _HTT_H_  #include <linux/bug.h> +#include <linux/interrupt.h> +#include <linux/dmapool.h> +#include <net/mac80211.h>  #include "htc.h"  #include "rx_desc.h" -#define HTT_CURRENT_VERSION_MAJOR	2 -#define HTT_CURRENT_VERSION_MINOR	1 -  enum htt_dbg_stats_type {  	HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,  	HTT_DBG_STATS_RX_REORDER    = 1 << 1, @@ -45,6 +45,9 @@ enum htt_h2t_msg_type { /* host-to-target */  	HTT_H2T_MSG_TYPE_SYNC               = 4,  	HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,  	HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, + +	/* This command is used for sending management frames in HTT < 3.0. +	 * HTT >= 3.0 uses TX_FRM for everything. */  	HTT_H2T_MSG_TYPE_MGMT_TX            = 7,  	HTT_H2T_NUM_MSGS /* keep this last */ @@ -1170,18 +1173,12 @@ struct htt_peer_unmap_event {  	u16 peer_id;  }; -struct htt_rx_info { -	struct sk_buff *skb; -	enum htt_rx_mpdu_status status; -	enum htt_rx_mpdu_encrypt_type encrypt_type; -	s8 signal; -	struct { -		u8 info0; -		u32 info1; -		u32 info2; -	} rate; -	bool fcs_err; -}; +struct ath10k_htt_txbuf { +	struct htt_data_tx_desc_frag frags[2]; +	struct ath10k_htc_hdr htc_hdr; +	struct htt_cmd_hdr cmd_hdr; +	struct htt_data_tx_desc cmd_tx; +} __packed;  struct ath10k_htt {  	struct ath10k *ar; @@ -1264,10 +1261,21 @@ struct ath10k_htt {  	struct sk_buff **pending_tx;  	unsigned long *used_msdu_ids; /* bitmap */  	wait_queue_head_t empty_tx_wq; +	struct dma_pool *tx_pool;  	/* set if host-fw communication goes haywire  	 * used to avoid further failures */  	bool rx_confused; +	struct tasklet_struct rx_replenish_task; + +	/* This is used to group tx/rx completions separately and process them +	 * in batches to reduce cache stalls */ +	struct tasklet_struct txrx_compl_task; +	struct sk_buff_head tx_compl_q; +	struct sk_buff_head rx_compl_q; + +	/* rx_status template */ +	struct ieee80211_rx_status rx_status;  };  #define RX_HTT_HDR_STATUS_LEN 64 @@ -1308,6 +1316,10 @@ struct htt_rx_desc {  #define HTT_RX_BUF_SIZE 1920  #define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc)) +/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle + * aggregated traffic more nicely. */ +#define ATH10K_HTT_MAX_NUM_REFILL 16 +  /*   * DMA_MAP expects the buffer to be an integral number of cache lines.   * Rather than checking the actual cache line size, this code makes a @@ -1316,17 +1328,20 @@ struct htt_rx_desc {  #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7	/* 2^7 = 128 */  #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) -int ath10k_htt_attach(struct ath10k *ar); -int ath10k_htt_attach_target(struct ath10k_htt *htt); -void ath10k_htt_detach(struct ath10k_htt *htt); +int ath10k_htt_connect(struct ath10k_htt *htt); +int ath10k_htt_init(struct ath10k *ar); +int ath10k_htt_setup(struct ath10k_htt *htt); + +int ath10k_htt_tx_alloc(struct ath10k_htt *htt); +void ath10k_htt_tx_free(struct ath10k_htt *htt); + +int ath10k_htt_rx_alloc(struct ath10k_htt *htt); +void ath10k_htt_rx_free(struct ath10k_htt *htt); -int ath10k_htt_tx_attach(struct ath10k_htt *htt); -void ath10k_htt_tx_detach(struct ath10k_htt *htt); -int ath10k_htt_rx_attach(struct ath10k_htt *htt); -void ath10k_htt_rx_detach(struct ath10k_htt *htt);  void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);  void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);  int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); +int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);  int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);  void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); @@ -1334,4 +1349,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);  void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);  int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);  int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); +  #endif diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index e784c40b904..eebc860c365 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -20,6 +20,7 @@  #include "htt.h"  #include "txrx.h"  #include "debug.h" +#include "trace.h"  #include <linux/log2.h> @@ -40,6 +41,10 @@  /* when under memory pressure rx ring refill may fail and needs a retry */  #define HTT_RX_RING_REFILL_RETRY_MS 50 + +static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); +static void ath10k_htt_txrx_compl_task(unsigned long ptr); +  static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)  {  	int size; @@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)  static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)  { -	int ret, num_to_fill; +	int ret, num_deficit, num_to_fill; +	/* Refilling the whole RX ring buffer proves to be a bad idea. The +	 * reason is RX may take up significant amount of CPU cycles and starve +	 * other tasks, e.g. TX on an ethernet device while acting as a bridge +	 * with ath10k wlan interface. This ended up with very poor performance +	 * once CPU the host system was overwhelmed with RX on ath10k. +	 * +	 * By limiting the number of refills the replenishing occurs +	 * progressively. This in turns makes use of the fact tasklets are +	 * processed in FIFO order. This means actual RX processing can starve +	 * out refilling. If there's not enough buffers on RX ring FW will not +	 * report RX until it is refilled with enough buffers. This +	 * automatically balances load wrt to CPU power. +	 * +	 * This probably comes at a cost of lower maximum throughput but +	 * improves the avarage and stability. */  	spin_lock_bh(&htt->rx_ring.lock); -	num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; +	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; +	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); +	num_deficit -= num_to_fill;  	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);  	if (ret == -ENOMEM) {  		/* @@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)  		 */  		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +  			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); +	} else if (num_deficit > 0) { +		tasklet_schedule(&htt->rx_replenish_task);  	}  	spin_unlock_bh(&htt->rx_ring.lock);  } @@ -201,30 +225,34 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)  	ath10k_htt_rx_msdu_buff_replenish(htt);  } -static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) +static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)  { -	return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - -		htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; +	struct sk_buff *skb; +	int i; + +	for (i = 0; i < htt->rx_ring.size; i++) { +		skb = htt->rx_ring.netbufs_ring[i]; +		if (!skb) +			continue; + +		dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr, +				 skb->len + skb_tailroom(skb), +				 DMA_FROM_DEVICE); +		dev_kfree_skb_any(skb); +		htt->rx_ring.netbufs_ring[i] = NULL; +	}  } -void ath10k_htt_rx_detach(struct ath10k_htt *htt) +void ath10k_htt_rx_free(struct ath10k_htt *htt)  { -	int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; -  	del_timer_sync(&htt->rx_ring.refill_retry_timer); +	tasklet_kill(&htt->rx_replenish_task); +	tasklet_kill(&htt->txrx_compl_task); -	while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { -		struct sk_buff *skb = -				htt->rx_ring.netbufs_ring[sw_rd_idx]; -		struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); +	skb_queue_purge(&htt->tx_compl_q); +	skb_queue_purge(&htt->rx_compl_q); -		dma_unmap_single(htt->ar->dev, cb->paddr, -				 skb->len + skb_tailroom(skb), -				 DMA_FROM_DEVICE); -		dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); -		sw_rd_idx++; -		sw_rd_idx &= htt->rx_ring.size_mask; -	} +	ath10k_htt_rx_ring_clean_up(htt);  	dma_free_coherent(htt->ar->dev,  			  (htt->rx_ring.size * @@ -245,20 +273,22 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)  	int idx;  	struct sk_buff *msdu; -	spin_lock_bh(&htt->rx_ring.lock); +	lockdep_assert_held(&htt->rx_ring.lock); -	if (ath10k_htt_rx_ring_elems(htt) == 0) -		ath10k_warn("htt rx ring is empty!\n"); +	if (htt->rx_ring.fill_cnt == 0) { +		ath10k_warn("tried to pop sk_buff from an empty rx ring\n"); +		return NULL; +	}  	idx = htt->rx_ring.sw_rd_idx.msdu_payld;  	msdu = htt->rx_ring.netbufs_ring[idx]; +	htt->rx_ring.netbufs_ring[idx] = NULL;  	idx++;  	idx &= htt->rx_ring.size_mask;  	htt->rx_ring.sw_rd_idx.msdu_payld = idx;  	htt->rx_ring.fill_cnt--; -	spin_unlock_bh(&htt->rx_ring.lock);  	return msdu;  } @@ -273,6 +303,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)  	}  } +/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */  static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,  				   u8 **fw_desc, int *fw_desc_len,  				   struct sk_buff **head_msdu, @@ -282,12 +313,11 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,  	struct sk_buff *msdu;  	struct htt_rx_desc *rx_desc; -	if (ath10k_htt_rx_ring_elems(htt) == 0) -		ath10k_warn("htt rx ring is empty!\n"); +	lockdep_assert_held(&htt->rx_ring.lock);  	if (htt->rx_confused) {  		ath10k_warn("htt is confused. refusing rx\n"); -		return 0; +		return -1;  	}  	msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); @@ -299,7 +329,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,  				 msdu->len + skb_tailroom(msdu),  				 DMA_FROM_DEVICE); -		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", +		ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",  				msdu->data, msdu->len + skb_tailroom(msdu));  		rx_desc = (struct htt_rx_desc *)msdu->data; @@ -392,8 +422,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,  					 next->len + skb_tailroom(next),  					 DMA_FROM_DEVICE); -			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ", -					next->data, +			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, +					"htt rx chained: ", next->data,  					next->len + skb_tailroom(next));  			skb_trim(next, 0); @@ -405,12 +435,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,  			msdu_chaining = 1;  		} -		if (msdu_len > 0) { -			/* This may suggest FW bug? */ -			ath10k_warn("htt rx msdu len not consumed (%d)\n", -				    msdu_len); -		} -  		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &  				RX_MSDU_END_INFO0_LAST_MSDU; @@ -425,6 +449,9 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,  	}  	*tail_msdu = msdu; +	if (*head_msdu == NULL) +		msdu_chaining = -1; +  	/*  	 * Don't refill the ring yet.  	 * @@ -441,7 +468,13 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,  	return msdu_chaining;  } -int ath10k_htt_rx_attach(struct ath10k_htt *htt) +static void ath10k_htt_rx_replenish_task(unsigned long ptr) +{ +	struct ath10k_htt *htt = (struct ath10k_htt *)ptr; +	ath10k_htt_rx_msdu_buff_replenish(htt); +} + +int ath10k_htt_rx_alloc(struct ath10k_htt *htt)  {  	dma_addr_t paddr;  	void *vaddr; @@ -467,7 +500,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)  	htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);  	htt->rx_ring.netbufs_ring = -		kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), +		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),  			GFP_KERNEL);  	if (!htt->rx_ring.netbufs_ring)  		goto err_netbuf; @@ -501,7 +534,16 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)  	if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))  		goto err_fill_ring; -	ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n", +	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, +		     (unsigned long)htt); + +	skb_queue_head_init(&htt->tx_compl_q); +	skb_queue_head_init(&htt->rx_compl_q); + +	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, +		     (unsigned long)htt); + +	ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",  		   htt->rx_ring.size, htt->rx_ring.fill_level);  	return 0; @@ -590,138 +632,342 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)  	return false;  } -static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt, -			struct htt_rx_info *info) +struct rfc1042_hdr { +	u8 llc_dsap; +	u8 llc_ssap; +	u8 llc_ctrl; +	u8 snap_oui[3]; +	__be16 snap_type; +} __packed; + +struct amsdu_subframe_hdr { +	u8 dst[ETH_ALEN]; +	u8 src[ETH_ALEN]; +	__be16 len; +} __packed; + +static const u8 rx_legacy_rate_idx[] = { +	3,	/* 0x00  - 11Mbps  */ +	2,	/* 0x01  - 5.5Mbps */ +	1,	/* 0x02  - 2Mbps   */ +	0,	/* 0x03  - 1Mbps   */ +	3,	/* 0x04  - 11Mbps  */ +	2,	/* 0x05  - 5.5Mbps */ +	1,	/* 0x06  - 2Mbps   */ +	0,	/* 0x07  - 1Mbps   */ +	10,	/* 0x08  - 48Mbps  */ +	8,	/* 0x09  - 24Mbps  */ +	6,	/* 0x0A  - 12Mbps  */ +	4,	/* 0x0B  - 6Mbps   */ +	11,	/* 0x0C  - 54Mbps  */ +	9,	/* 0x0D  - 36Mbps  */ +	7,	/* 0x0E  - 18Mbps  */ +	5,	/* 0x0F  - 9Mbps   */ +}; + +static void ath10k_htt_rx_h_rates(struct ath10k *ar, +				  enum ieee80211_band band, +				  u8 info0, u32 info1, u32 info2, +				  struct ieee80211_rx_status *status) +{ +	u8 cck, rate, rate_idx, bw, sgi, mcs, nss; +	u8 preamble = 0; + +	/* Check if valid fields */ +	if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) +		return; + +	preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); + +	switch (preamble) { +	case HTT_RX_LEGACY: +		cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; +		rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); +		rate_idx = 0; + +		if (rate < 0x08 || rate > 0x0F) +			break; + +		switch (band) { +		case IEEE80211_BAND_2GHZ: +			if (cck) +				rate &= ~BIT(3); +			rate_idx = rx_legacy_rate_idx[rate]; +			break; +		case IEEE80211_BAND_5GHZ: +			rate_idx = rx_legacy_rate_idx[rate]; +			/* We are using same rate table registering +			   HW - ath10k_rates[]. In case of 5GHz skip +			   CCK rates, so -4 here */ +			rate_idx -= 4; +			break; +		default: +			break; +		} + +		status->rate_idx = rate_idx; +		break; +	case HTT_RX_HT: +	case HTT_RX_HT_WITH_TXBF: +		/* HT-SIG - Table 20-11 in info1 and info2 */ +		mcs = info1 & 0x1F; +		nss = mcs >> 3; +		bw = (info1 >> 7) & 1; +		sgi = (info2 >> 7) & 1; + +		status->rate_idx = mcs; +		status->flag |= RX_FLAG_HT; +		if (sgi) +			status->flag |= RX_FLAG_SHORT_GI; +		if (bw) +			status->flag |= RX_FLAG_40MHZ; +		break; +	case HTT_RX_VHT: +	case HTT_RX_VHT_WITH_TXBF: +		/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 +		   TODO check this */ +		mcs = (info2 >> 4) & 0x0F; +		nss = ((info1 >> 10) & 0x07) + 1; +		bw = info1 & 3; +		sgi = info2 & 1; + +		status->rate_idx = mcs; +		status->vht_nss = nss; + +		if (sgi) +			status->flag |= RX_FLAG_SHORT_GI; + +		switch (bw) { +		/* 20MHZ */ +		case 0: +			break; +		/* 40MHZ */ +		case 1: +			status->flag |= RX_FLAG_40MHZ; +			break; +		/* 80MHZ */ +		case 2: +			status->vht_flag |= RX_VHT_FLAG_80MHZ; +		} + +		status->flag |= RX_FLAG_VHT; +		break; +	default: +		break; +	} +} + +static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt, +				      struct ieee80211_rx_status *rx_status, +				      struct sk_buff *skb, +				      enum htt_rx_mpdu_encrypt_type enctype, +				      enum rx_msdu_decap_format fmt, +				      bool dot11frag) +{ +	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + +	rx_status->flag &= ~(RX_FLAG_DECRYPTED | +			     RX_FLAG_IV_STRIPPED | +			     RX_FLAG_MMIC_STRIPPED); + +	if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) +		return; + +	/* +	 * There's no explicit rx descriptor flag to indicate whether a given +	 * frame has been decrypted or not. We're forced to use the decap +	 * format as an implicit indication. However fragmentation rx is always +	 * raw and it probably never reports undecrypted raws. +	 * +	 * This makes sure sniffed frames are reported as-is without stripping +	 * the protected flag. +	 */ +	if (fmt == RX_MSDU_DECAP_RAW && !dot11frag) +		return; + +	rx_status->flag |= RX_FLAG_DECRYPTED | +			   RX_FLAG_IV_STRIPPED | +			   RX_FLAG_MMIC_STRIPPED; +	hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) & +					   ~IEEE80211_FCTL_PROTECTED); +} + +static bool ath10k_htt_rx_h_channel(struct ath10k *ar, +				    struct ieee80211_rx_status *status) +{ +	struct ieee80211_channel *ch; + +	spin_lock_bh(&ar->data_lock); +	ch = ar->scan_channel; +	if (!ch) +		ch = ar->rx_channel; +	spin_unlock_bh(&ar->data_lock); + +	if (!ch) +		return false; + +	status->band = ch->band; +	status->freq = ch->center_freq; + +	return true; +} + +static void ath10k_process_rx(struct ath10k *ar, +			      struct ieee80211_rx_status *rx_status, +			      struct sk_buff *skb) +{ +	struct ieee80211_rx_status *status; + +	status = IEEE80211_SKB_RXCB(skb); +	*status = *rx_status; + +	ath10k_dbg(ATH10K_DBG_DATA, +		   "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n", +		   skb, +		   skb->len, +		   status->flag == 0 ? "legacy" : "", +		   status->flag & RX_FLAG_HT ? "ht" : "", +		   status->flag & RX_FLAG_VHT ? "vht" : "", +		   status->flag & RX_FLAG_40MHZ ? "40" : "", +		   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "", +		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", +		   status->rate_idx, +		   status->vht_nss, +		   status->freq, +		   status->band, status->flag, +		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC), +		   !!(status->flag & RX_FLAG_MMIC_ERROR)); +	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", +			skb->data, skb->len); + +	ieee80211_rx(ar->hw, skb); +} + +static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) +{ +	/* nwifi header is padded to 4 bytes. this fixes 4addr rx */ +	return round_up(ieee80211_hdrlen(hdr->frame_control), 4); +} + +static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, +				struct ieee80211_rx_status *rx_status, +				struct sk_buff *skb_in)  {  	struct htt_rx_desc *rxd; -	struct sk_buff *amsdu; +	struct sk_buff *skb = skb_in;  	struct sk_buff *first; -	struct ieee80211_hdr *hdr; -	struct sk_buff *skb = info->skb;  	enum rx_msdu_decap_format fmt;  	enum htt_rx_mpdu_encrypt_type enctype; +	struct ieee80211_hdr *hdr; +	u8 hdr_buf[64], addr[ETH_ALEN], *qos;  	unsigned int hdr_len; -	int crypto_len;  	rxd = (void *)skb->data - sizeof(*rxd); -	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), -			RX_MSDU_START_INFO1_DECAP_FORMAT);  	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),  			RX_MPDU_START_INFO0_ENCRYPT_TYPE); -	/* FIXME: No idea what assumptions are safe here. Need logs */ -	if ((fmt == RX_MSDU_DECAP_RAW && skb->next) || -	    (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) { -		ath10k_htt_rx_free_msdu_chain(skb->next); -		skb->next = NULL; -		return -ENOTSUPP; -	} - -	/* A-MSDU max is a little less than 8K */ -	amsdu = dev_alloc_skb(8*1024); -	if (!amsdu) { -		ath10k_warn("A-MSDU allocation failed\n"); -		ath10k_htt_rx_free_msdu_chain(skb->next); -		skb->next = NULL; -		return -ENOMEM; -	} - -	if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) { -		int hdrlen; - -		hdr = (void *)rxd->rx_hdr_status; -		hdrlen = ieee80211_hdrlen(hdr->frame_control); -		memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen); -	} +	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; +	hdr_len = ieee80211_hdrlen(hdr->frame_control); +	memcpy(hdr_buf, hdr, hdr_len); +	hdr = (struct ieee80211_hdr *)hdr_buf;  	first = skb;  	while (skb) {  		void *decap_hdr; -		int decap_len = 0; +		int len;  		rxd = (void *)skb->data - sizeof(*rxd);  		fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), -				RX_MSDU_START_INFO1_DECAP_FORMAT); +			 RX_MSDU_START_INFO1_DECAP_FORMAT);  		decap_hdr = (void *)rxd->rx_hdr_status; -		if (skb == first) { -			/* We receive linked A-MSDU subframe skbuffs. The -			 * first one contains the original 802.11 header (and -			 * possible crypto param) in the RX descriptor. The -			 * A-MSDU subframe header follows that. Each part is -			 * aligned to 4 byte boundary. */ - -			hdr = (void *)amsdu->data; -			hdr_len = ieee80211_hdrlen(hdr->frame_control); -			crypto_len = ath10k_htt_rx_crypto_param_len(enctype); - -			decap_hdr += roundup(hdr_len, 4); -			decap_hdr += roundup(crypto_len, 4); -		} - -		if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { -			/* Ethernet2 decap inserts ethernet header in place of -			 * A-MSDU subframe header. */ -			skb_pull(skb, 6 + 6 + 2); +		skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); -			/* A-MSDU subframe header length */ -			decap_len += 6 + 6 + 2; - -			/* Ethernet2 decap also strips the LLC/SNAP so we need -			 * to re-insert it. The LLC/SNAP follows A-MSDU -			 * subframe header. */ -			/* FIXME: Not all LLCs are 8 bytes long */ -			decap_len += 8; - -			memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); +		/* First frame in an A-MSDU chain has more decapped data. */ +		if (skb == first) { +			len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); +			len += round_up(ath10k_htt_rx_crypto_param_len(enctype), +					4); +			decap_hdr += len;  		} -		if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) { -			/* Native Wifi decap inserts regular 802.11 header -			 * in place of A-MSDU subframe header. */ +		switch (fmt) { +		case RX_MSDU_DECAP_RAW: +			/* remove trailing FCS */ +			skb_trim(skb, skb->len - FCS_LEN); +			break; +		case RX_MSDU_DECAP_NATIVE_WIFI: +			/* pull decapped header and copy DA */  			hdr = (struct ieee80211_hdr *)skb->data; -			skb_pull(skb, ieee80211_hdrlen(hdr->frame_control)); +			hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); +			memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); +			skb_pull(skb, hdr_len); -			/* A-MSDU subframe header length */ -			decap_len += 6 + 6 + 2; +			/* push original 802.11 header */ +			hdr = (struct ieee80211_hdr *)hdr_buf; +			hdr_len = ieee80211_hdrlen(hdr->frame_control); +			memcpy(skb_push(skb, hdr_len), hdr, hdr_len); -			memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); -		} +			/* original A-MSDU header has the bit set but we're +			 * not including A-MSDU subframe header */ +			hdr = (struct ieee80211_hdr *)skb->data; +			qos = ieee80211_get_qos_ctl(hdr); +			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; -		if (fmt == RX_MSDU_DECAP_RAW) -			skb_trim(skb, skb->len - 4); /* remove FCS */ +			/* original 802.11 header has a different DA */ +			memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); +			break; +		case RX_MSDU_DECAP_ETHERNET2_DIX: +			/* strip ethernet header and insert decapped 802.11 +			 * header, amsdu subframe header and rfc1042 header */ -		memcpy(skb_put(amsdu, skb->len), skb->data, skb->len); +			len = 0; +			len += sizeof(struct rfc1042_hdr); +			len += sizeof(struct amsdu_subframe_hdr); -		/* A-MSDU subframes are padded to 4bytes -		 * but relative to first subframe, not the whole MPDU */ -		if (skb->next && ((decap_len + skb->len) & 3)) { -			int padlen = 4 - ((decap_len + skb->len) & 3); -			memset(skb_put(amsdu, padlen), 0, padlen); +			skb_pull(skb, sizeof(struct ethhdr)); +			memcpy(skb_push(skb, len), decap_hdr, len); +			memcpy(skb_push(skb, hdr_len), hdr, hdr_len); +			break; +		case RX_MSDU_DECAP_8023_SNAP_LLC: +			/* insert decapped 802.11 header making a singly +			 * A-MSDU */ +			memcpy(skb_push(skb, hdr_len), hdr, hdr_len); +			break;  		} +		skb_in = skb; +		ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt, +					  false);  		skb = skb->next; -	} +		skb_in->next = NULL; -	info->skb = amsdu; -	info->encrypt_type = enctype; +		if (skb) +			rx_status->flag |= RX_FLAG_AMSDU_MORE; +		else +			rx_status->flag &= ~RX_FLAG_AMSDU_MORE; -	ath10k_htt_rx_free_msdu_chain(first); +		ath10k_process_rx(htt->ar, rx_status, skb_in); +	} -	return 0; +	/* FIXME: It might be nice to re-assemble the A-MSDU when there's a +	 * monitor interface active for sniffing purposes. */  } -static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) +static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, +			       struct ieee80211_rx_status *rx_status, +			       struct sk_buff *skb)  { -	struct sk_buff *skb = info->skb;  	struct htt_rx_desc *rxd;  	struct ieee80211_hdr *hdr;  	enum rx_msdu_decap_format fmt;  	enum htt_rx_mpdu_encrypt_type enctype; +	int hdr_len; +	void *rfc1042;  	/* This shouldn't happen. If it does than it may be a FW bug. */  	if (skb->next) { -		ath10k_warn("received chained non A-MSDU frame\n"); +		ath10k_warn("htt rx received chained non A-MSDU frame\n");  		ath10k_htt_rx_free_msdu_chain(skb->next);  		skb->next = NULL;  	} @@ -731,77 +977,52 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)  			RX_MSDU_START_INFO1_DECAP_FORMAT);  	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),  			RX_MPDU_START_INFO0_ENCRYPT_TYPE); -	hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN; +	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; +	hdr_len = ieee80211_hdrlen(hdr->frame_control); + +	skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);  	switch (fmt) {  	case RX_MSDU_DECAP_RAW:  		/* remove trailing FCS */ -		skb_trim(skb, skb->len - 4); +		skb_trim(skb, skb->len - FCS_LEN);  		break;  	case RX_MSDU_DECAP_NATIVE_WIFI: -		/* nothing to do here */ +		/* Pull decapped header */ +		hdr = (struct ieee80211_hdr *)skb->data; +		hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); +		skb_pull(skb, hdr_len); + +		/* Push original header */ +		hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; +		hdr_len = ieee80211_hdrlen(hdr->frame_control); +		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);  		break;  	case RX_MSDU_DECAP_ETHERNET2_DIX: -		/* macaddr[6] + macaddr[6] + ethertype[2] */ -		skb_pull(skb, 6 + 6 + 2); -		break; -	case RX_MSDU_DECAP_8023_SNAP_LLC: -		/* macaddr[6] + macaddr[6] + len[2] */ -		/* we don't need this for non-A-MSDU */ -		skb_pull(skb, 6 + 6 + 2); -		break; -	} +		/* strip ethernet header and insert decapped 802.11 header and +		 * rfc1042 header */ -	if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { -		void *llc; -		int llclen; +		rfc1042 = hdr; +		rfc1042 += roundup(hdr_len, 4); +		rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); -		llclen = 8; -		llc  = hdr; -		llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4); -		llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); +		skb_pull(skb, sizeof(struct ethhdr)); +		memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), +		       rfc1042, sizeof(struct rfc1042_hdr)); +		memcpy(skb_push(skb, hdr_len), hdr, hdr_len); +		break; +	case RX_MSDU_DECAP_8023_SNAP_LLC: +		/* remove A-MSDU subframe header and insert +		 * decapped 802.11 header. rfc1042 header is already there */ -		skb_push(skb, llclen); -		memcpy(skb->data, llc, llclen); +		skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); +		memcpy(skb_push(skb, hdr_len), hdr, hdr_len); +		break;  	} -	if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) { -		int len = ieee80211_hdrlen(hdr->frame_control); -		skb_push(skb, len); -		memcpy(skb->data, hdr, len); -	} +	ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false); -	info->skb = skb; -	info->encrypt_type = enctype; -	return 0; -} - -static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) -{ -	struct htt_rx_desc *rxd; -	u32 flags; - -	rxd = (void *)skb->data - sizeof(*rxd); -	flags = __le32_to_cpu(rxd->attention.flags); - -	if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) -		return true; - -	return false; -} - -static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) -{ -	struct htt_rx_desc *rxd; -	u32 flags; - -	rxd = (void *)skb->data - sizeof(*rxd); -	flags = __le32_to_cpu(rxd->attention.flags); - -	if (flags & RX_ATTENTION_FLAGS_FCS_ERR) -		return true; - -	return false; +	ath10k_process_rx(htt->ar, rx_status, skb);  }  static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) @@ -835,20 +1056,123 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)  	return CHECKSUM_UNNECESSARY;  } +static int ath10k_unchain_msdu(struct sk_buff *msdu_head) +{ +	struct sk_buff *next = msdu_head->next; +	struct sk_buff *to_free = next; +	int space; +	int total_len = 0; + +	/* TODO:  Might could optimize this by using +	 * skb_try_coalesce or similar method to +	 * decrease copying, or maybe get mac80211 to +	 * provide a way to just receive a list of +	 * skb? +	 */ + +	msdu_head->next = NULL; + +	/* Allocate total length all at once. */ +	while (next) { +		total_len += next->len; +		next = next->next; +	} + +	space = total_len - skb_tailroom(msdu_head); +	if ((space > 0) && +	    (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) { +		/* TODO:  bump some rx-oom error stat */ +		/* put it back together so we can free the +		 * whole list at once. +		 */ +		msdu_head->next = to_free; +		return -1; +	} + +	/* Walk list again, copying contents into +	 * msdu_head +	 */ +	next = to_free; +	while (next) { +		skb_copy_from_linear_data(next, skb_put(msdu_head, next->len), +					  next->len); +		next = next->next; +	} + +	/* If here, we have consolidated skb.  Free the +	 * fragments and pass the main skb on up the +	 * stack. +	 */ +	ath10k_htt_rx_free_msdu_chain(to_free); +	return 0; +} + +static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, +					struct sk_buff *head, +					enum htt_rx_mpdu_status status, +					bool channel_set, +					u32 attention) +{ +	if (head->len == 0) { +		ath10k_dbg(ATH10K_DBG_HTT, +			   "htt rx dropping due to zero-len\n"); +		return false; +	} + +	if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) { +		ath10k_dbg(ATH10K_DBG_HTT, +			   "htt rx dropping due to decrypt-err\n"); +		return false; +	} + +	if (!channel_set) { +		ath10k_warn("no channel configured; ignoring frame!\n"); +		return false; +	} + +	/* Skip mgmt frames while we handle this in WMI */ +	if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || +	    attention & RX_ATTENTION_FLAGS_MGMT_TYPE) { +		ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); +		return false; +	} + +	if (status != HTT_RX_IND_MPDU_STATUS_OK && +	    status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && +	    status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && +	    !htt->ar->monitor_started) { +		ath10k_dbg(ATH10K_DBG_HTT, +			   "htt rx ignoring frame w/ status %d\n", +			   status); +		return false; +	} + +	if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { +		ath10k_dbg(ATH10K_DBG_HTT, +			   "htt rx CAC running\n"); +		return false; +	} + +	return true; +} +  static void ath10k_htt_rx_handler(struct ath10k_htt *htt,  				  struct htt_rx_indication *rx)  { -	struct htt_rx_info info; +	struct ieee80211_rx_status *rx_status = &htt->rx_status;  	struct htt_rx_indication_mpdu_range *mpdu_ranges; +	struct htt_rx_desc *rxd; +	enum htt_rx_mpdu_status status;  	struct ieee80211_hdr *hdr;  	int num_mpdu_ranges; +	u32 attention;  	int fw_desc_len;  	u8 *fw_desc; +	bool channel_set;  	int i, j;  	int ret; -	int ip_summed; -	memset(&info, 0, sizeof(info)); +	lockdep_assert_held(&htt->rx_ring.lock);  	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);  	fw_desc = (u8 *)&rx->fw_desc; @@ -857,120 +1181,106 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,  			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);  	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); +	/* Fill this once, while this is per-ppdu */ +	if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) { +		memset(rx_status, 0, sizeof(*rx_status)); +		rx_status->signal  = ATH10K_DEFAULT_NOISE_FLOOR + +				     rx->ppdu.combined_rssi; +	} + +	if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) { +		/* TSF available only in 32-bit */ +		rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff; +		rx_status->flag |= RX_FLAG_MACTIME_END; +	} + +	channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status); + +	if (channel_set) { +		ath10k_htt_rx_h_rates(htt->ar, rx_status->band, +				      rx->ppdu.info0, +				      __le32_to_cpu(rx->ppdu.info1), +				      __le32_to_cpu(rx->ppdu.info2), +				      rx_status); +	} +  	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",  			rx, sizeof(*rx) +  			(sizeof(struct htt_rx_indication_mpdu_range) *  				num_mpdu_ranges));  	for (i = 0; i < num_mpdu_ranges; i++) { -		info.status = mpdu_ranges[i].mpdu_range_status; +		status = mpdu_ranges[i].mpdu_range_status;  		for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {  			struct sk_buff *msdu_head, *msdu_tail; -			enum htt_rx_mpdu_status status; -			int msdu_chaining;  			msdu_head = NULL;  			msdu_tail = NULL; -			msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, -							 &fw_desc, -							 &fw_desc_len, -							 &msdu_head, -							 &msdu_tail); - -			if (!msdu_head) { -				ath10k_warn("htt rx no data!\n"); -				continue; -			} - -			if (msdu_head->len == 0) { -				ath10k_dbg(ATH10K_DBG_HTT, -					   "htt rx dropping due to zero-len\n"); +			ret = ath10k_htt_rx_amsdu_pop(htt, +						      &fw_desc, +						      &fw_desc_len, +						      &msdu_head, +						      &msdu_tail); + +			if (ret < 0) { +				ath10k_warn("failed to pop amsdu from htt rx ring %d\n", +					    ret);  				ath10k_htt_rx_free_msdu_chain(msdu_head);  				continue;  			} -			if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { -				ath10k_htt_rx_free_msdu_chain(msdu_head); -				continue; -			} +			rxd = container_of((void *)msdu_head->data, +					   struct htt_rx_desc, +					   msdu_payload); +			attention = __le32_to_cpu(rxd->attention.flags); -			status = info.status; - -			/* Skip mgmt frames while we handle this in WMI */ -			if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) { +			if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head, +							 status, +							 channel_set, +							 attention)) {  				ath10k_htt_rx_free_msdu_chain(msdu_head);  				continue;  			} -			if (status != HTT_RX_IND_MPDU_STATUS_OK && -			    status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && -			    !htt->ar->monitor_enabled) { -				ath10k_dbg(ATH10K_DBG_HTT, -					   "htt rx ignoring frame w/ status %d\n", -					   status); +			if (ret > 0 && +			    ath10k_unchain_msdu(msdu_head) < 0) {  				ath10k_htt_rx_free_msdu_chain(msdu_head);  				continue;  			} -			/* FIXME: we do not support chaining yet. -			 * this needs investigation */ -			if (msdu_chaining) { -				ath10k_warn("msdu_chaining is true\n"); -				ath10k_htt_rx_free_msdu_chain(msdu_head); -				continue; -			} - -			/* The skb is not yet processed and it may be -			 * reallocated. Since the offload is in the original -			 * skb extract the checksum now and assign it later */ -			ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); - -			info.skb     = msdu_head; -			info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); -			info.signal  = ATH10K_DEFAULT_NOISE_FLOOR; -			info.signal += rx->ppdu.combined_rssi; +			if (attention & RX_ATTENTION_FLAGS_FCS_ERR) +				rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; +			else +				rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC; -			info.rate.info0 = rx->ppdu.info0; -			info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); -			info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); +			if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR) +				rx_status->flag |= RX_FLAG_MMIC_ERROR; +			else +				rx_status->flag &= ~RX_FLAG_MMIC_ERROR;  			hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);  			if (ath10k_htt_rx_hdr_is_amsdu(hdr)) -				ret = ath10k_htt_rx_amsdu(htt, &info); +				ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);  			else -				ret = ath10k_htt_rx_msdu(htt, &info); - -			if (ret && !info.fcs_err) { -				ath10k_warn("error processing msdus %d\n", ret); -				dev_kfree_skb_any(info.skb); -				continue; -			} - -			if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) -				ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); - -			info.skb->ip_summed = ip_summed; - -			ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", -					info.skb->data, info.skb->len); -			ath10k_process_rx(htt->ar, &info); +				ath10k_htt_rx_msdu(htt, rx_status, msdu_head);  		}  	} -	ath10k_htt_rx_msdu_buff_replenish(htt); +	tasklet_schedule(&htt->rx_replenish_task);  }  static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,  				struct htt_rx_fragment_indication *frag)  {  	struct sk_buff *msdu_head, *msdu_tail; +	enum htt_rx_mpdu_encrypt_type enctype;  	struct htt_rx_desc *rxd;  	enum rx_msdu_decap_format fmt; -	struct htt_rx_info info = {}; +	struct ieee80211_rx_status *rx_status = &htt->rx_status;  	struct ieee80211_hdr *hdr; -	int msdu_chaining; +	int ret;  	bool tkip_mic_err;  	bool decrypt_err;  	u8 *fw_desc; @@ -982,23 +1292,23 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,  	msdu_head = NULL;  	msdu_tail = NULL; -	msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, -						&msdu_head, &msdu_tail); -	ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); +	spin_lock_bh(&htt->rx_ring.lock); +	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, +				      &msdu_head, &msdu_tail); +	spin_unlock_bh(&htt->rx_ring.lock); -	if (!msdu_head) { -		ath10k_warn("htt rx frag no data\n"); -		return; -	} +	ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); -	if (msdu_chaining || msdu_head != msdu_tail) { -		ath10k_warn("aggregation with fragmentation?!\n"); +	if (ret) { +		ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n", +			    ret);  		ath10k_htt_rx_free_msdu_chain(msdu_head);  		return;  	}  	/* FIXME: implement signal strength */ +	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;  	hdr = (struct ieee80211_hdr *)msdu_head->data;  	rxd = (void *)msdu_head->data - sizeof(*rxd); @@ -1015,57 +1325,55 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,  		goto end;  	} -	info.skb = msdu_head; -	info.status = HTT_RX_IND_MPDU_STATUS_OK; -	info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), -				RX_MPDU_START_INFO0_ENCRYPT_TYPE); -	info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); +	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), +		     RX_MPDU_START_INFO0_ENCRYPT_TYPE); +	ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt, +				  true); +	msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); -	if (tkip_mic_err) { +	if (tkip_mic_err)  		ath10k_warn("tkip mic error\n"); -		info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; -	}  	if (decrypt_err) {  		ath10k_warn("decryption err in fragmented rx\n"); -		dev_kfree_skb_any(info.skb); +		dev_kfree_skb_any(msdu_head);  		goto end;  	} -	if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { +	if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {  		hdrlen = ieee80211_hdrlen(hdr->frame_control); -		paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); +		paramlen = ath10k_htt_rx_crypto_param_len(enctype);  		/* It is more efficient to move the header than the payload */ -		memmove((void *)info.skb->data + paramlen, -			(void *)info.skb->data, +		memmove((void *)msdu_head->data + paramlen, +			(void *)msdu_head->data,  			hdrlen); -		skb_pull(info.skb, paramlen); -		hdr = (struct ieee80211_hdr *)info.skb->data; +		skb_pull(msdu_head, paramlen); +		hdr = (struct ieee80211_hdr *)msdu_head->data;  	}  	/* remove trailing FCS */  	trim  = 4;  	/* remove crypto trailer */ -	trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); +	trim += ath10k_htt_rx_crypto_tail_len(enctype);  	/* last fragment of TKIP frags has MIC */  	if (!ieee80211_has_morefrags(hdr->frame_control) && -	    info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) +	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)  		trim += 8; -	if (trim > info.skb->len) { +	if (trim > msdu_head->len) {  		ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); -		dev_kfree_skb_any(info.skb); +		dev_kfree_skb_any(msdu_head);  		goto end;  	} -	skb_trim(info.skb, info.skb->len - trim); +	skb_trim(msdu_head, msdu_head->len - trim); -	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ", -			info.skb->data, info.skb->len); -	ath10k_process_rx(htt->ar, &info); +	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", +			msdu_head->data, msdu_head->len); +	ath10k_process_rx(htt->ar, rx_status, msdu_head);  end:  	if (fw_desc_len > 0) { @@ -1075,6 +1383,45 @@ end:  	}  } +static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, +				       struct sk_buff *skb) +{ +	struct ath10k_htt *htt = &ar->htt; +	struct htt_resp *resp = (struct htt_resp *)skb->data; +	struct htt_tx_done tx_done = {}; +	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); +	__le16 msdu_id; +	int i; + +	lockdep_assert_held(&htt->tx_lock); + +	switch (status) { +	case HTT_DATA_TX_STATUS_NO_ACK: +		tx_done.no_ack = true; +		break; +	case HTT_DATA_TX_STATUS_OK: +		break; +	case HTT_DATA_TX_STATUS_DISCARD: +	case HTT_DATA_TX_STATUS_POSTPONE: +	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: +		tx_done.discard = true; +		break; +	default: +		ath10k_warn("unhandled tx completion status %d\n", status); +		tx_done.discard = true; +		break; +	} + +	ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", +		   resp->data_tx_completion.num_msdus); + +	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { +		msdu_id = resp->data_tx_completion.msdus[i]; +		tx_done.msdu_id = __le16_to_cpu(msdu_id); +		ath10k_txrx_tx_unref(htt, &tx_done); +	} +} +  void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)  {  	struct ath10k_htt *htt = &ar->htt; @@ -1084,7 +1431,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)  	if (!IS_ALIGNED((unsigned long)skb->data, 4))  		ath10k_warn("unaligned htt message, expect trouble\n"); -	ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n", +	ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",  		   resp->hdr.msg_type);  	switch (resp->hdr.msg_type) {  	case HTT_T2H_MSG_TYPE_VERSION_CONF: { @@ -1093,10 +1440,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)  		complete(&htt->target_version_received);  		break;  	} -	case HTT_T2H_MSG_TYPE_RX_IND: { -		ath10k_htt_rx_handler(htt, &resp->rx_ind); -		break; -	} +	case HTT_T2H_MSG_TYPE_RX_IND: +		spin_lock_bh(&htt->rx_ring.lock); +		__skb_queue_tail(&htt->rx_compl_q, skb); +		spin_unlock_bh(&htt->rx_ring.lock); +		tasklet_schedule(&htt->txrx_compl_task); +		return;  	case HTT_T2H_MSG_TYPE_PEER_MAP: {  		struct htt_peer_map_event ev = {  			.vdev_id = resp->peer_map.vdev_id, @@ -1131,44 +1480,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)  			break;  		} -		ath10k_txrx_tx_completed(htt, &tx_done); -		break; -	} -	case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { -		struct htt_tx_done tx_done = {}; -		int status = MS(resp->data_tx_completion.flags, -				HTT_DATA_TX_STATUS); -		__le16 msdu_id; -		int i; - -		switch (status) { -		case HTT_DATA_TX_STATUS_NO_ACK: -			tx_done.no_ack = true; -			break; -		case HTT_DATA_TX_STATUS_OK: -			break; -		case HTT_DATA_TX_STATUS_DISCARD: -		case HTT_DATA_TX_STATUS_POSTPONE: -		case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: -			tx_done.discard = true; -			break; -		default: -			ath10k_warn("unhandled tx completion status %d\n", -				    status); -			tx_done.discard = true; -			break; -		} - -		ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", -			   resp->data_tx_completion.num_msdus); - -		for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { -			msdu_id = resp->data_tx_completion.msdus[i]; -			tx_done.msdu_id = __le16_to_cpu(msdu_id); -			ath10k_txrx_tx_completed(htt, &tx_done); -		} +		spin_lock_bh(&htt->tx_lock); +		ath10k_txrx_tx_unref(htt, &tx_done); +		spin_unlock_bh(&htt->tx_lock);  		break;  	} +	case HTT_T2H_MSG_TYPE_TX_COMPL_IND: +		spin_lock_bh(&htt->tx_lock); +		__skb_queue_tail(&htt->tx_compl_q, skb); +		spin_unlock_bh(&htt->tx_lock); +		tasklet_schedule(&htt->txrx_compl_task); +		return;  	case HTT_T2H_MSG_TYPE_SEC_IND: {  		struct ath10k *ar = htt->ar;  		struct htt_security_indication *ev = &resp->security_indication; @@ -1190,8 +1512,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)  	case HTT_T2H_MSG_TYPE_TEST:  		/* FIX THIS */  		break; -	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:  	case HTT_T2H_MSG_TYPE_STATS_CONF: +		trace_ath10k_htt_stats(skb->data, skb->len); +		break; +	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:  	case HTT_T2H_MSG_TYPE_RX_ADDBA:  	case HTT_T2H_MSG_TYPE_RX_DELBA:  	case HTT_T2H_MSG_TYPE_RX_FLUSH: @@ -1206,3 +1530,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)  	/* Free the indication buffer */  	dev_kfree_skb_any(skb);  } + +static void ath10k_htt_txrx_compl_task(unsigned long ptr) +{ +	struct ath10k_htt *htt = (struct ath10k_htt *)ptr; +	struct htt_resp *resp; +	struct sk_buff *skb; + +	spin_lock_bh(&htt->tx_lock); +	while ((skb = __skb_dequeue(&htt->tx_compl_q))) { +		ath10k_htt_rx_frm_tx_compl(htt->ar, skb); +		dev_kfree_skb_any(skb); +	} +	spin_unlock_bh(&htt->tx_lock); + +	spin_lock_bh(&htt->rx_ring.lock); +	while ((skb = __skb_dequeue(&htt->rx_compl_q))) { +		resp = (struct htt_resp *)skb->data; +		ath10k_htt_rx_handler(htt, &resp->rx_ind); +		dev_kfree_skb_any(skb); +	} +	spin_unlock_bh(&htt->rx_ring.lock); +} diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 656c2546b29..7064354d1f4 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -83,20 +83,17 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)  	__clear_bit(msdu_id, htt->used_msdu_ids);  } -int ath10k_htt_tx_attach(struct ath10k_htt *htt) +int ath10k_htt_tx_alloc(struct ath10k_htt *htt)  { -	u8 pipe; -  	spin_lock_init(&htt->tx_lock);  	init_waitqueue_head(&htt->empty_tx_wq); -	/* At the beginning free queue number should hint us the maximum -	 * queue length */ -	pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id; -	htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, -								   pipe); +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) +		htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; +	else +		htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; -	ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", +	ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",  		   htt->max_num_pending_tx);  	htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * @@ -112,66 +109,50 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)  		return -ENOMEM;  	} +	htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, +				       sizeof(struct ath10k_htt_txbuf), 4, 0); +	if (!htt->tx_pool) { +		kfree(htt->used_msdu_ids); +		kfree(htt->pending_tx); +		return -ENOMEM; +	} +  	return 0;  } -static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) +static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)  { -	struct sk_buff *txdesc; +	struct htt_tx_done tx_done = {0};  	int msdu_id; -	/* No locks needed. Called after communication with the device has -	 * been stopped. */ - +	spin_lock_bh(&htt->tx_lock);  	for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {  		if (!test_bit(msdu_id, htt->used_msdu_ids))  			continue; -		txdesc = htt->pending_tx[msdu_id]; -		if (!txdesc) -			continue; -  		ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",  			   msdu_id); -		if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) -			ATH10K_SKB_CB(txdesc)->htt.refcount = 1; +		tx_done.discard = 1; +		tx_done.msdu_id = msdu_id; -		ATH10K_SKB_CB(txdesc)->htt.discard = true; -		ath10k_txrx_tx_unref(htt, txdesc); +		ath10k_txrx_tx_unref(htt, &tx_done);  	} +	spin_unlock_bh(&htt->tx_lock);  } -void ath10k_htt_tx_detach(struct ath10k_htt *htt) +void ath10k_htt_tx_free(struct ath10k_htt *htt)  { -	ath10k_htt_tx_cleanup_pending(htt); +	ath10k_htt_tx_free_pending(htt);  	kfree(htt->pending_tx);  	kfree(htt->used_msdu_ids); +	dma_pool_destroy(htt->tx_pool);  	return;  }  void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)  { -	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); -	struct ath10k_htt *htt = &ar->htt; - -	if (skb_cb->htt.is_conf) { -		dev_kfree_skb_any(skb); -		return; -	} - -	if (skb_cb->is_aborted) { -		skb_cb->htt.discard = true; - -		/* if the skbuff is aborted we need to make sure we'll free up -		 * the tx resources, we can't simply run tx_unref() 2 times -		 * because if htt tx completion came in earlier we'd access -		 * unallocated memory */ -		if (skb_cb->htt.refcount > 1) -			skb_cb->htt.refcount = 1; -	} - -	ath10k_txrx_tx_unref(htt, skb); +	dev_kfree_skb_any(skb);  }  int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) @@ -192,10 +173,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)  	cmd = (struct htt_cmd *)skb->data;  	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; -	ATH10K_SKB_CB(skb)->htt.is_conf = true; +	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); +	if (ret) { +		dev_kfree_skb_any(skb); +		return ret; +	} + +	return 0; +} + +int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) +{ +	struct htt_stats_req *req; +	struct sk_buff *skb; +	struct htt_cmd *cmd; +	int len = 0, ret; + +	len += sizeof(cmd->hdr); +	len += sizeof(cmd->stats_req); + +	skb = ath10k_htc_alloc_skb(len); +	if (!skb) +		return -ENOMEM; + +	skb_put(skb, len); +	cmd = (struct htt_cmd *)skb->data; +	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; + +	req = &cmd->stats_req; + +	memset(req, 0, sizeof(*req)); + +	/* currently we support only max 8 bit masks so no need to worry +	 * about endian support */ +	req->upload_types[0] = mask; +	req->reset_types[0] = mask; +	req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; +	req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); +	req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);  	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);  	if (ret) { +		ath10k_warn("failed to send htt type stats request: %d", ret);  		dev_kfree_skb_any(skb);  		return ret;  	} @@ -279,8 +298,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)  #undef desc_offset -	ATH10K_SKB_CB(skb)->htt.is_conf = true; -  	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);  	if (ret) {  		dev_kfree_skb_any(skb); @@ -293,10 +310,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)  int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)  {  	struct device *dev = htt->ar->dev; -	struct ath10k_skb_cb *skb_cb;  	struct sk_buff *txdesc = NULL;  	struct htt_cmd *cmd; -	u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; +	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); +	u8 vdev_id = skb_cb->vdev_id;  	int len = 0;  	int msdu_id = -1;  	int res; @@ -304,30 +321,32 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)  	res = ath10k_htt_tx_inc_pending(htt);  	if (res) -		return res; +		goto err;  	len += sizeof(cmd->hdr);  	len += sizeof(cmd->mgmt_tx); -	txdesc = ath10k_htc_alloc_skb(len); -	if (!txdesc) { -		res = -ENOMEM; -		goto err; -	} -  	spin_lock_bh(&htt->tx_lock); -	msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); -	if (msdu_id < 0) { +	res = ath10k_htt_tx_alloc_msdu_id(htt); +	if (res < 0) {  		spin_unlock_bh(&htt->tx_lock); -		res = msdu_id; -		goto err; +		goto err_tx_dec;  	} -	htt->pending_tx[msdu_id] = txdesc; +	msdu_id = res; +	htt->pending_tx[msdu_id] = msdu;  	spin_unlock_bh(&htt->tx_lock); -	res = ath10k_skb_map(dev, msdu); +	txdesc = ath10k_htc_alloc_skb(len); +	if (!txdesc) { +		res = -ENOMEM; +		goto err_free_msdu_id; +	} + +	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, +				       DMA_TO_DEVICE); +	res = dma_mapping_error(dev, skb_cb->paddr);  	if (res) -		goto err; +		goto err_free_txdesc;  	skb_put(txdesc, len);  	cmd = (struct htt_cmd *)txdesc->data; @@ -339,174 +358,184 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)  	memcpy(cmd->mgmt_tx.hdr, msdu->data,  	       min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); -	/* refcount is decremented by HTC and HTT completions until it reaches -	 * zero and is freed */ -	skb_cb = ATH10K_SKB_CB(txdesc); -	skb_cb->htt.msdu_id = msdu_id; -	skb_cb->htt.refcount = 2; -	skb_cb->htt.msdu = msdu; +	skb_cb->htt.txbuf = NULL;  	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);  	if (res) -		goto err; +		goto err_unmap_msdu;  	return 0; -err: -	ath10k_skb_unmap(dev, msdu); - -	if (txdesc) -		dev_kfree_skb_any(txdesc); -	if (msdu_id >= 0) { -		spin_lock_bh(&htt->tx_lock); -		htt->pending_tx[msdu_id] = NULL; -		ath10k_htt_tx_free_msdu_id(htt, msdu_id); -		spin_unlock_bh(&htt->tx_lock); -	} +err_unmap_msdu: +	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); +err_free_txdesc: +	dev_kfree_skb_any(txdesc); +err_free_msdu_id: +	spin_lock_bh(&htt->tx_lock); +	htt->pending_tx[msdu_id] = NULL; +	ath10k_htt_tx_free_msdu_id(htt, msdu_id); +	spin_unlock_bh(&htt->tx_lock); +err_tx_dec:  	ath10k_htt_tx_dec_pending(htt); +err:  	return res;  }  int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)  {  	struct device *dev = htt->ar->dev; -	struct htt_cmd *cmd; -	struct htt_data_tx_desc_frag *tx_frags;  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; -	struct ath10k_skb_cb *skb_cb; -	struct sk_buff *txdesc = NULL; -	struct sk_buff *txfrag = NULL; -	u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; -	u8 tid; -	int prefetch_len, desc_len, frag_len; -	dma_addr_t frags_paddr; -	int msdu_id = -1; +	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); +	struct ath10k_hif_sg_item sg_items[2]; +	struct htt_data_tx_desc_frag *frags; +	u8 vdev_id = skb_cb->vdev_id; +	u8 tid = skb_cb->htt.tid; +	int prefetch_len;  	int res; -	u8 flags0; -	u16 flags1; +	u8 flags0 = 0; +	u16 msdu_id, flags1 = 0; +	dma_addr_t paddr; +	u32 frags_paddr; +	bool use_frags;  	res = ath10k_htt_tx_inc_pending(htt);  	if (res) -		return res; - -	prefetch_len = min(htt->prefetch_len, msdu->len); -	prefetch_len = roundup(prefetch_len, 4); - -	desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; -	frag_len = sizeof(*tx_frags) * 2; - -	txdesc = ath10k_htc_alloc_skb(desc_len); -	if (!txdesc) { -		res = -ENOMEM; -		goto err; -	} - -	txfrag = dev_alloc_skb(frag_len); -	if (!txfrag) { -		res = -ENOMEM; -		goto err; -	} - -	if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { -		ath10k_warn("htt alignment check failed. dropping packet.\n"); -		res = -EIO;  		goto err; -	}  	spin_lock_bh(&htt->tx_lock); -	msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); -	if (msdu_id < 0) { +	res = ath10k_htt_tx_alloc_msdu_id(htt); +	if (res < 0) {  		spin_unlock_bh(&htt->tx_lock); -		res = msdu_id; -		goto err; +		goto err_tx_dec;  	} -	htt->pending_tx[msdu_id] = txdesc; +	msdu_id = res; +	htt->pending_tx[msdu_id] = msdu;  	spin_unlock_bh(&htt->tx_lock); -	res = ath10k_skb_map(dev, msdu); +	prefetch_len = min(htt->prefetch_len, msdu->len); +	prefetch_len = roundup(prefetch_len, 4); + +	/* Since HTT 3.0 there is no separate mgmt tx command. However in case +	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx +	 * fragment list host driver specifies directly frame pointer. */ +	use_frags = htt->target_version_major < 3 || +		    !ieee80211_is_mgmt(hdr->frame_control); + +	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, +					   &paddr); +	if (!skb_cb->htt.txbuf) +		goto err_free_msdu_id; +	skb_cb->htt.txbuf_paddr = paddr; + +	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, +				       DMA_TO_DEVICE); +	res = dma_mapping_error(dev, skb_cb->paddr);  	if (res) -		goto err; +		goto err_free_txbuf; -	/* tx fragment list must be terminated with zero-entry */ -	skb_put(txfrag, frag_len); -	tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; -	tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); -	tx_frags[0].len   = __cpu_to_le32(msdu->len); -	tx_frags[1].paddr = __cpu_to_le32(0); -	tx_frags[1].len   = __cpu_to_le32(0); +	if (likely(use_frags)) { +		frags = skb_cb->htt.txbuf->frags; -	res = ath10k_skb_map(dev, txfrag); -	if (res) -		goto err; +		frags[0].paddr = __cpu_to_le32(skb_cb->paddr); +		frags[0].len = __cpu_to_le32(msdu->len); +		frags[1].paddr = 0; +		frags[1].len = 0; -	ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", -		   (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, -		   (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); -	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", -			txfrag->data, frag_len); -	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", -			msdu->data, msdu->len); +		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, +			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); -	skb_put(txdesc, desc_len); -	cmd = (struct htt_cmd *)txdesc->data; -	memset(cmd, 0, desc_len); +		frags_paddr = skb_cb->htt.txbuf_paddr; +	} else { +		flags0 |= SM(ATH10K_HW_TXRX_MGMT, +			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); -	tid = ATH10K_SKB_CB(msdu)->htt.tid; +		frags_paddr = skb_cb->paddr; +	} -	ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); +	/* Normally all commands go through HTC which manages tx credits for +	 * each endpoint and notifies when tx is completed. +	 * +	 * HTT endpoint is creditless so there's no need to care about HTC +	 * flags. In that case it is trivial to fill the HTC header here. +	 * +	 * MSDU transmission is considered completed upon HTT event. This +	 * implies no relevant resources can be freed until after the event is +	 * received. That's why HTC tx completion handler itself is ignored by +	 * setting NULL to transfer_context for all sg items. +	 * +	 * There is simply no point in pushing HTT TX_FRM through HTC tx path +	 * as it's a waste of resources. By bypassing HTC it is possible to +	 * avoid extra memory allocations, compress data structures and thus +	 * improve performance. */ + +	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; +	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( +			sizeof(skb_cb->htt.txbuf->cmd_hdr) + +			sizeof(skb_cb->htt.txbuf->cmd_tx) + +			prefetch_len); +	skb_cb->htt.txbuf->htc_hdr.flags = 0; -	flags0  = 0;  	if (!ieee80211_has_protected(hdr->frame_control))  		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; +  	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; -	flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, -		     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); -	flags1  = 0;  	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);  	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);  	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;  	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; -	frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; - -	cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM; -	cmd->data_tx.flags0      = flags0; -	cmd->data_tx.flags1      = __cpu_to_le16(flags1); -	cmd->data_tx.len         = __cpu_to_le16(msdu->len); -	cmd->data_tx.id          = __cpu_to_le16(msdu_id); -	cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); -	cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID); - -	memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); - -	/* refcount is decremented by HTC and HTT completions until it reaches -	 * zero and is freed */ -	skb_cb = ATH10K_SKB_CB(txdesc); -	skb_cb->htt.msdu_id = msdu_id; -	skb_cb->htt.refcount = 2; -	skb_cb->htt.txfrag = txfrag; -	skb_cb->htt.msdu = msdu; +	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; +	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; +	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); +	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); +	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); +	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); +	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); + +	ath10k_dbg(ATH10K_DBG_HTT, +		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n", +		   flags0, flags1, msdu->len, msdu_id, frags_paddr, +		   (u32)skb_cb->paddr, vdev_id, tid); +	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", +			msdu->data, msdu->len); -	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); +	sg_items[0].transfer_id = 0; +	sg_items[0].transfer_context = NULL; +	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; +	sg_items[0].paddr = skb_cb->htt.txbuf_paddr + +			    sizeof(skb_cb->htt.txbuf->frags); +	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + +			  sizeof(skb_cb->htt.txbuf->cmd_hdr) + +			  sizeof(skb_cb->htt.txbuf->cmd_tx); + +	sg_items[1].transfer_id = 0; +	sg_items[1].transfer_context = NULL; +	sg_items[1].vaddr = msdu->data; +	sg_items[1].paddr = skb_cb->paddr; +	sg_items[1].len = prefetch_len; + +	res = ath10k_hif_tx_sg(htt->ar, +			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id, +			       sg_items, ARRAY_SIZE(sg_items));  	if (res) -		goto err; +		goto err_unmap_msdu;  	return 0; -err: -	if (txfrag) -		ath10k_skb_unmap(dev, txfrag); -	if (txdesc) -		dev_kfree_skb_any(txdesc); -	if (txfrag) -		dev_kfree_skb_any(txfrag); -	if (msdu_id >= 0) { -		spin_lock_bh(&htt->tx_lock); -		htt->pending_tx[msdu_id] = NULL; -		ath10k_htt_tx_free_msdu_id(htt, msdu_id); -		spin_unlock_bh(&htt->tx_lock); -	} + +err_unmap_msdu: +	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); +err_free_txbuf: +	dma_pool_free(htt->tx_pool, +		      skb_cb->htt.txbuf, +		      skb_cb->htt.txbuf_paddr); +err_free_msdu_id: +	spin_lock_bh(&htt->tx_lock); +	htt->pending_tx[msdu_id] = NULL; +	ath10k_htt_tx_free_msdu_id(htt, msdu_id); +	spin_unlock_bh(&htt->tx_lock); +err_tx_dec:  	ath10k_htt_tx_dec_pending(htt); -	ath10k_skb_unmap(dev, msdu); +err:  	return res;  } diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 44ed5af0a20..007e855f4ba 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -20,28 +20,38 @@  #include "targaddrs.h" -/* Supported FW version */ -#define SUPPORTED_FW_MAJOR	1 -#define SUPPORTED_FW_MINOR	0 -#define SUPPORTED_FW_RELEASE	0 -#define SUPPORTED_FW_BUILD	629 - -/* QCA988X 1.0 definitions */ -#define QCA988X_HW_1_0_VERSION		0x4000002c -#define QCA988X_HW_1_0_FW_DIR		"ath10k/QCA988X/hw1.0" -#define QCA988X_HW_1_0_FW_FILE		"firmware.bin" -#define QCA988X_HW_1_0_OTP_FILE		"otp.bin" -#define QCA988X_HW_1_0_BOARD_DATA_FILE	"board.bin" -#define QCA988X_HW_1_0_PATCH_LOAD_ADDR	0x1234 +/* QCA988X 1.0 definitions (unsupported) */ +#define QCA988X_HW_1_0_CHIP_ID_REV	0x0  /* QCA988X 2.0 definitions */  #define QCA988X_HW_2_0_VERSION		0x4100016c +#define QCA988X_HW_2_0_CHIP_ID_REV	0x2  #define QCA988X_HW_2_0_FW_DIR		"ath10k/QCA988X/hw2.0"  #define QCA988X_HW_2_0_FW_FILE		"firmware.bin" +#define QCA988X_HW_2_0_FW_2_FILE	"firmware-2.bin"  #define QCA988X_HW_2_0_OTP_FILE		"otp.bin"  #define QCA988X_HW_2_0_BOARD_DATA_FILE	"board.bin"  #define QCA988X_HW_2_0_PATCH_LOAD_ADDR	0x1234 +#define ATH10K_FW_API2_FILE		"firmware-2.bin" + +/* includes also the null byte */ +#define ATH10K_FIRMWARE_MAGIC               "QCA-ATH10K" + +struct ath10k_fw_ie { +	__le32 id; +	__le32 len; +	u8 data[0]; +}; + +enum ath10k_fw_ie_type { +	ATH10K_FW_IE_FW_VERSION = 0, +	ATH10K_FW_IE_TIMESTAMP = 1, +	ATH10K_FW_IE_FEATURES = 2, +	ATH10K_FW_IE_FW_IMAGE = 3, +	ATH10K_FW_IE_OTP_IMAGE = 4, +}; +  /* Known pecularities:   *  - current FW doesn't support raw rx mode (last tested v599)   *  - current FW dumps upon raw tx mode (last tested v599) @@ -53,6 +63,9 @@ enum ath10k_hw_txrx_mode {  	ATH10K_HW_TXRX_RAW = 0,  	ATH10K_HW_TXRX_NATIVE_WIFI = 1,  	ATH10K_HW_TXRX_ETHERNET = 2, + +	/* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */ +	ATH10K_HW_TXRX_MGMT = 3,  };  enum ath10k_mcast2ucast_mode { @@ -60,6 +73,7 @@ enum ath10k_mcast2ucast_mode {  	ATH10K_MCAST2UCAST_ENABLED = 1,  }; +/* Target specific defines for MAIN firmware */  #define TARGET_NUM_VDEVS			8  #define TARGET_NUM_PEER_AST			2  #define TARGET_NUM_WDS_ENTRIES			32 @@ -75,7 +89,11 @@ enum ath10k_mcast2ucast_mode {  #define TARGET_RX_CHAIN_MASK			(BIT(0) | BIT(1) | BIT(2))  #define TARGET_RX_TIMEOUT_LO_PRI		100  #define TARGET_RX_TIMEOUT_HI_PRI		40 -#define TARGET_RX_DECAP_MODE			ATH10K_HW_TXRX_ETHERNET + +/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and + * avoid a very expensive re-alignment in mac80211. */ +#define TARGET_RX_DECAP_MODE			ATH10K_HW_TXRX_NATIVE_WIFI +  #define TARGET_SCAN_MAX_PENDING_REQS		4  #define TARGET_BMISS_OFFLOAD_MAX_VDEV		3  #define TARGET_ROAM_OFFLOAD_MAX_VDEV		3 @@ -90,6 +108,37 @@ enum ath10k_mcast2ucast_mode {  #define TARGET_NUM_MSDU_DESC			(1024 + 400)  #define TARGET_MAX_FRAG_ENTRIES			0 +/* Target specific defines for 10.X firmware */ +#define TARGET_10X_NUM_VDEVS			16 +#define TARGET_10X_NUM_PEER_AST			2 +#define TARGET_10X_NUM_WDS_ENTRIES		32 +#define TARGET_10X_DMA_BURST_SIZE		0 +#define TARGET_10X_MAC_AGGR_DELIM		0 +#define TARGET_10X_AST_SKID_LIMIT		16 +#define TARGET_10X_NUM_PEERS			(128 + (TARGET_10X_NUM_VDEVS)) +#define TARGET_10X_NUM_PEERS_MAX		128 +#define TARGET_10X_NUM_OFFLOAD_PEERS		0 +#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS	0 +#define TARGET_10X_NUM_PEER_KEYS		2 +#define TARGET_10X_NUM_TIDS			256 +#define TARGET_10X_TX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2)) +#define TARGET_10X_RX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2)) +#define TARGET_10X_RX_TIMEOUT_LO_PRI		100 +#define TARGET_10X_RX_TIMEOUT_HI_PRI		40 +#define TARGET_10X_RX_DECAP_MODE		ATH10K_HW_TXRX_NATIVE_WIFI +#define TARGET_10X_SCAN_MAX_PENDING_REQS	4 +#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV	2 +#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV	2 +#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES	8 +#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV		3 +#define TARGET_10X_NUM_MCAST_GROUPS		0 +#define TARGET_10X_NUM_MCAST_TABLE_ELEMS	0 +#define TARGET_10X_MCAST2UCAST_MODE		ATH10K_MCAST2UCAST_DISABLED +#define TARGET_10X_TX_DBG_LOG_SIZE		1024 +#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 +#define TARGET_10X_VOW_CONFIG			0 +#define TARGET_10X_NUM_MSDU_DESC		(1024 + 400) +#define TARGET_10X_MAX_FRAG_ENTRIES		0  /* Number of Copy Engines supported */  #define CE_COUNT 8 @@ -157,8 +206,11 @@ enum ath10k_mcast2ucast_mode {  #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS	0x0006c000  #define PCIE_LOCAL_BASE_ADDRESS			0x00080000 +#define SOC_RESET_CONTROL_ADDRESS		0x00000000  #define SOC_RESET_CONTROL_OFFSET		0x00000000  #define SOC_RESET_CONTROL_SI0_RST_MASK		0x00000001 +#define SOC_RESET_CONTROL_CE_RST_MASK		0x00040000 +#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK	0x00000040  #define SOC_CPU_CLOCK_OFFSET			0x00000020  #define SOC_CPU_CLOCK_STANDARD_LSB		0  #define SOC_CPU_CLOCK_STANDARD_MASK		0x00000003 @@ -168,6 +220,12 @@ enum ath10k_mcast2ucast_mode {  #define SOC_LPO_CAL_OFFSET			0x000000e0  #define SOC_LPO_CAL_ENABLE_LSB			20  #define SOC_LPO_CAL_ENABLE_MASK			0x00100000 +#define SOC_LF_TIMER_CONTROL0_ADDRESS		0x00000050 +#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK	0x00000004 + +#define SOC_CHIP_ID_ADDRESS			0x000000ec +#define SOC_CHIP_ID_REV_LSB			8 +#define SOC_CHIP_ID_REV_MASK			0x00000f00  #define WLAN_RESET_CONTROL_COLD_RST_MASK	0x00000008  #define WLAN_RESET_CONTROL_WARM_RST_MASK	0x00000004 @@ -218,8 +276,10 @@ enum ath10k_mcast2ucast_mode {  #define CORE_CTRL_CPU_INTR_MASK			0x00002000  #define CORE_CTRL_ADDRESS			0x0000  #define PCIE_INTR_ENABLE_ADDRESS		0x0008 +#define PCIE_INTR_CAUSE_ADDRESS			0x000c  #define PCIE_INTR_CLR_ADDRESS			0x0014  #define SCRATCH_3_ADDRESS			0x0030 +#define CPU_INTR_ADDRESS			0x0010  /* Firmware indications to the Host via SCRATCH_3 register. */  #define FW_INDICATOR_ADDRESS	(SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index cf2ba4d850c..a21080028c5 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -54,7 +54,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,  	switch (key->cipher) {  	case WLAN_CIPHER_SUITE_CCMP:  		arg.key_cipher = WMI_CIPHER_AES_CCM; -		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; +		if (arvif->vdev_type == WMI_VDEV_TYPE_AP) +			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; +		else +			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;  		break;  	case WLAN_CIPHER_SUITE_TKIP:  		arg.key_cipher = WMI_CIPHER_TKIP; @@ -92,7 +95,7 @@ static int ath10k_install_key(struct ath10k_vif *arvif,  	lockdep_assert_held(&ar->conf_mutex); -	INIT_COMPLETION(ar->install_key_done); +	reinit_completion(&ar->install_key_done);  	ret = ath10k_send_key(arvif, key, cmd, macaddr);  	if (ret) @@ -165,7 +168,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,  			first_errno = ret;  		if (ret) -			ath10k_warn("could not remove peer wep key %d (%d)\n", +			ath10k_warn("failed to remove peer wep key %d: %d\n",  				    i, ret);  		peer->keys[i] = NULL; @@ -213,7 +216,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,  			first_errno = ret;  		if (ret) -			ath10k_warn("could not remove key for %pM\n", addr); +			ath10k_warn("failed to remove key for %pM: %d\n", +				    addr, ret);  	}  	return first_errno; @@ -322,37 +326,95 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)  	lockdep_assert_held(&ar->conf_mutex);  	ret = ath10k_wmi_peer_create(ar, vdev_id, addr); -	if (ret) +	if (ret) { +		ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n", +			    addr, vdev_id, ret);  		return ret; +	}  	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); -	if (ret) +	if (ret) { +		ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n", +			    addr, vdev_id, ret); +		return ret; +	} +	spin_lock_bh(&ar->data_lock); +	ar->num_peers++; +	spin_unlock_bh(&ar->data_lock); + +	return 0; +} + +static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) +{ +	struct ath10k *ar = arvif->ar; +	u32 param; +	int ret; + +	param = ar->wmi.pdev_param->sta_kickout_th; +	ret = ath10k_wmi_pdev_set_param(ar, param, +					ATH10K_KICKOUT_THRESHOLD); +	if (ret) { +		ath10k_warn("failed to set kickout threshold on vdev %i: %d\n", +			    arvif->vdev_id, ret); +		return ret; +	} + +	param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, +					ATH10K_KEEPALIVE_MIN_IDLE); +	if (ret) { +		ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n", +			    arvif->vdev_id, ret); +		return ret; +	} + +	param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, +					ATH10K_KEEPALIVE_MAX_IDLE); +	if (ret) { +		ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n", +			    arvif->vdev_id, ret); +		return ret; +	} + +	param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, +					ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); +	if (ret) { +		ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n", +			    arvif->vdev_id, ret);  		return ret; +	}  	return 0;  }  static int  ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)  { +	struct ath10k *ar = arvif->ar; +	u32 vdev_param; +  	if (value != 0xFFFFFFFF)  		value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,  			      ATH10K_RTS_MAX); -	return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, -					 WMI_VDEV_PARAM_RTS_THRESHOLD, -					 value); +	vdev_param = ar->wmi.vdev_param->rts_threshold; +	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);  }  static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)  { +	struct ath10k *ar = arvif->ar; +	u32 vdev_param; +  	if (value != 0xFFFFFFFF)  		value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,  				ATH10K_FRAGMT_THRESHOLD_MIN,  				ATH10K_FRAGMT_THRESHOLD_MAX); -	return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, -					 WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, -					 value); +	vdev_param = ar->wmi.vdev_param->fragmentation_threshold; +	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);  }  static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) @@ -369,6 +431,10 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)  	if (ret)  		return ret; +	spin_lock_bh(&ar->data_lock); +	ar->num_peers--; +	spin_unlock_bh(&ar->data_lock); +  	return 0;  } @@ -388,6 +454,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)  		list_del(&peer->list);  		kfree(peer); +		ar->num_peers--;  	}  	spin_unlock_bh(&ar->data_lock);  } @@ -403,6 +470,7 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)  		list_del(&peer->list);  		kfree(peer);  	} +	ar->num_peers = 0;  	spin_unlock_bh(&ar->data_lock);  } @@ -424,175 +492,114 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)  	return 0;  } -static int ath10k_vdev_start(struct ath10k_vif *arvif) +static bool ath10k_monitor_is_enabled(struct ath10k *ar)  { -	struct ath10k *ar = arvif->ar; -	struct ieee80211_conf *conf = &ar->hw->conf; -	struct ieee80211_channel *channel = conf->chandef.chan; -	struct wmi_vdev_start_request_arg arg = {}; -	int ret = 0; -  	lockdep_assert_held(&ar->conf_mutex); -	INIT_COMPLETION(ar->vdev_setup_done); - -	arg.vdev_id = arvif->vdev_id; -	arg.dtim_period = arvif->dtim_period; -	arg.bcn_intval = arvif->beacon_interval; - -	arg.channel.freq = channel->center_freq; - -	arg.channel.band_center_freq1 = conf->chandef.center_freq1; +	ath10k_dbg(ATH10K_DBG_MAC, +		   "mac monitor refs: promisc %d monitor %d cac %d\n", +		   ar->promisc, ar->monitor, +		   test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)); -	arg.channel.mode = chan_to_phymode(&conf->chandef); - -	arg.channel.min_power = channel->max_power * 3; -	arg.channel.max_power = channel->max_power * 4; -	arg.channel.max_reg_power = channel->max_reg_power * 4; -	arg.channel.max_antenna_gain = channel->max_antenna_gain; - -	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { -		arg.ssid = arvif->u.ap.ssid; -		arg.ssid_len = arvif->u.ap.ssid_len; -		arg.hidden_ssid = arvif->u.ap.hidden_ssid; -	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { -		arg.ssid = arvif->vif->bss_conf.ssid; -		arg.ssid_len = arvif->vif->bss_conf.ssid_len; -	} - -	ret = ath10k_wmi_vdev_start(ar, &arg); -	if (ret) { -		ath10k_warn("WMI vdev start failed: ret %d\n", ret); -		return ret; -	} - -	ret = ath10k_vdev_setup_sync(ar); -	if (ret) { -		ath10k_warn("vdev setup failed %d\n", ret); -		return ret; -	} - -	return ret; -} - -static int ath10k_vdev_stop(struct ath10k_vif *arvif) -{ -	struct ath10k *ar = arvif->ar; -	int ret; - -	lockdep_assert_held(&ar->conf_mutex); - -	INIT_COMPLETION(ar->vdev_setup_done); - -	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); -	if (ret) { -		ath10k_warn("WMI vdev stop failed: ret %d\n", ret); -		return ret; -	} - -	ret = ath10k_vdev_setup_sync(ar); -	if (ret) { -		ath10k_warn("vdev setup failed %d\n", ret); -		return ret; -	} - -	return ret; +	return ar->promisc || ar->monitor || +	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);  } -static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) +static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)  { -	struct ieee80211_channel *channel = ar->hw->conf.chandef.chan; +	struct cfg80211_chan_def *chandef = &ar->chandef; +	struct ieee80211_channel *channel = chandef->chan;  	struct wmi_vdev_start_request_arg arg = {}; -	enum nl80211_channel_type type;  	int ret = 0;  	lockdep_assert_held(&ar->conf_mutex); -	type = cfg80211_get_chandef_type(&ar->hw->conf.chandef); -  	arg.vdev_id = vdev_id;  	arg.channel.freq = channel->center_freq; -	arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; +	arg.channel.band_center_freq1 = chandef->center_freq1;  	/* TODO setup this dynamically, what in case we  	   don't have any vifs? */ -	arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef); +	arg.channel.mode = chan_to_phymode(chandef); +	arg.channel.chan_radar = +			!!(channel->flags & IEEE80211_CHAN_RADAR); -	arg.channel.min_power = channel->max_power * 3; -	arg.channel.max_power = channel->max_power * 4; -	arg.channel.max_reg_power = channel->max_reg_power * 4; -	arg.channel.max_antenna_gain = channel->max_antenna_gain; +	arg.channel.min_power = 0; +	arg.channel.max_power = channel->max_power * 2; +	arg.channel.max_reg_power = channel->max_reg_power * 2; +	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;  	ret = ath10k_wmi_vdev_start(ar, &arg);  	if (ret) { -		ath10k_warn("Monitor vdev start failed: ret %d\n", ret); +		ath10k_warn("failed to request monitor vdev %i start: %d\n", +			    vdev_id, ret);  		return ret;  	}  	ret = ath10k_vdev_setup_sync(ar);  	if (ret) { -		ath10k_warn("Monitor vdev setup failed %d\n", ret); +		ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n", +			    vdev_id, ret);  		return ret;  	}  	ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);  	if (ret) { -		ath10k_warn("Monitor vdev up failed: %d\n", ret); +		ath10k_warn("failed to put up monitor vdev %i: %d\n", +			    vdev_id, ret);  		goto vdev_stop;  	}  	ar->monitor_vdev_id = vdev_id; -	ar->monitor_enabled = true; +	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n", +		   ar->monitor_vdev_id);  	return 0;  vdev_stop:  	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);  	if (ret) -		ath10k_warn("Monitor vdev stop failed: %d\n", ret); +		ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n", +			    ar->monitor_vdev_id, ret);  	return ret;  } -static int ath10k_monitor_stop(struct ath10k *ar) +static int ath10k_monitor_vdev_stop(struct ath10k *ar)  {  	int ret = 0;  	lockdep_assert_held(&ar->conf_mutex); -	/* For some reasons, ath10k_wmi_vdev_down() here couse -	 * often ath10k_wmi_vdev_stop() to fail. Next we could -	 * not run monitor vdev and driver reload -	 * required. Don't see such problems we skip -	 * ath10k_wmi_vdev_down() here. -	 */ +	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); +	if (ret) +		ath10k_warn("failed to put down monitor vdev %i: %d\n", +			    ar->monitor_vdev_id, ret);  	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);  	if (ret) -		ath10k_warn("Monitor vdev stop failed: %d\n", ret); +		ath10k_warn("failed to to request monitor vdev %i stop: %d\n", +			    ar->monitor_vdev_id, ret);  	ret = ath10k_vdev_setup_sync(ar);  	if (ret) -		ath10k_warn("Monitor_down sync failed: %d\n", ret); +		ath10k_warn("failed to synchronise monitor vdev %i: %d\n", +			    ar->monitor_vdev_id, ret); -	ar->monitor_enabled = false; +	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", +		   ar->monitor_vdev_id);  	return ret;  } -static int ath10k_monitor_create(struct ath10k *ar) +static int ath10k_monitor_vdev_create(struct ath10k *ar)  {  	int bit, ret = 0;  	lockdep_assert_held(&ar->conf_mutex); -	if (ar->monitor_present) { -		ath10k_warn("Monitor mode already enabled\n"); -		return 0; -	} -  	bit = ffs(ar->free_vdev_map);  	if (bit == 0) { -		ath10k_warn("No free VDEV slots\n"); +		ath10k_warn("failed to find free vdev id for monitor vdev\n");  		return -ENOMEM;  	} @@ -603,14 +610,14 @@ static int ath10k_monitor_create(struct ath10k *ar)  				     WMI_VDEV_TYPE_MONITOR,  				     0, ar->mac_addr);  	if (ret) { -		ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret); +		ath10k_warn("failed to request monitor vdev %i creation: %d\n", +			    ar->monitor_vdev_id, ret);  		goto vdev_fail;  	} -	ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n", +	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",  		   ar->monitor_vdev_id); -	ar->monitor_present = true;  	return 0;  vdev_fail: @@ -621,26 +628,266 @@ vdev_fail:  	return ret;  } -static int ath10k_monitor_destroy(struct ath10k *ar) +static int ath10k_monitor_vdev_delete(struct ath10k *ar)  {  	int ret = 0;  	lockdep_assert_held(&ar->conf_mutex); -	if (!ar->monitor_present) -		return 0; -  	ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);  	if (ret) { -		ath10k_warn("WMI vdev monitor delete failed: %d\n", ret); +		ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n", +			    ar->monitor_vdev_id, ret);  		return ret;  	}  	ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); -	ar->monitor_present = false; -	ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n", +	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", +		   ar->monitor_vdev_id); +	return ret; +} + +static int ath10k_monitor_start(struct ath10k *ar) +{ +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	if (!ath10k_monitor_is_enabled(ar)) { +		ath10k_warn("trying to start monitor with no references\n"); +		return 0; +	} + +	if (ar->monitor_started) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n"); +		return 0; +	} + +	ret = ath10k_monitor_vdev_create(ar); +	if (ret) { +		ath10k_warn("failed to create monitor vdev: %d\n", ret); +		return ret; +	} + +	ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); +	if (ret) { +		ath10k_warn("failed to start monitor vdev: %d\n", ret); +		ath10k_monitor_vdev_delete(ar); +		return ret; +	} + +	ar->monitor_started = true; +	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n"); + +	return 0; +} + +static void ath10k_monitor_stop(struct ath10k *ar) +{ +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	if (ath10k_monitor_is_enabled(ar)) { +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac monitor will be stopped later\n"); +		return; +	} + +	if (!ar->monitor_started) { +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac monitor probably failed to start earlier\n"); +		return; +	} + +	ret = ath10k_monitor_vdev_stop(ar); +	if (ret) +		ath10k_warn("failed to stop monitor vdev: %d\n", ret); + +	ret = ath10k_monitor_vdev_delete(ar); +	if (ret) +		ath10k_warn("failed to delete monitor vdev: %d\n", ret); + +	ar->monitor_started = false; +	ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n"); +} + +static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) +{ +	struct ath10k *ar = arvif->ar; +	u32 vdev_param, rts_cts = 0; + +	lockdep_assert_held(&ar->conf_mutex); + +	vdev_param = ar->wmi.vdev_param->enable_rtscts; + +	if (arvif->use_cts_prot || arvif->num_legacy_stations > 0) +		rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); + +	if (arvif->num_legacy_stations > 0) +		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, +			      WMI_RTSCTS_PROFILE); + +	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, +					 rts_cts); +} + +static int ath10k_start_cac(struct ath10k *ar) +{ +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); + +	ret = ath10k_monitor_start(ar); +	if (ret) { +		ath10k_warn("failed to start monitor (cac): %d\n", ret); +		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); +		return ret; +	} + +	ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",  		   ar->monitor_vdev_id); + +	return 0; +} + +static int ath10k_stop_cac(struct ath10k *ar) +{ +	lockdep_assert_held(&ar->conf_mutex); + +	/* CAC is not running - do nothing */ +	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) +		return 0; + +	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); +	ath10k_monitor_stop(ar); + +	ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n"); + +	return 0; +} + +static void ath10k_recalc_radar_detection(struct ath10k *ar) +{ +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	ath10k_stop_cac(ar); + +	if (!ar->radar_enabled) +		return; + +	if (ar->num_started_vdevs > 0) +		return; + +	ret = ath10k_start_cac(ar); +	if (ret) { +		/* +		 * Not possible to start CAC on current channel so starting +		 * radiation is not allowed, make this channel DFS_UNAVAILABLE +		 * by indicating that radar was detected. +		 */ +		ath10k_warn("failed to start CAC: %d\n", ret); +		ieee80211_radar_detected(ar->hw); +	} +} + +static int ath10k_vdev_start(struct ath10k_vif *arvif) +{ +	struct ath10k *ar = arvif->ar; +	struct cfg80211_chan_def *chandef = &ar->chandef; +	struct wmi_vdev_start_request_arg arg = {}; +	int ret = 0; + +	lockdep_assert_held(&ar->conf_mutex); + +	reinit_completion(&ar->vdev_setup_done); + +	arg.vdev_id = arvif->vdev_id; +	arg.dtim_period = arvif->dtim_period; +	arg.bcn_intval = arvif->beacon_interval; + +	arg.channel.freq = chandef->chan->center_freq; +	arg.channel.band_center_freq1 = chandef->center_freq1; +	arg.channel.mode = chan_to_phymode(chandef); + +	arg.channel.min_power = 0; +	arg.channel.max_power = chandef->chan->max_power * 2; +	arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; +	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; + +	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { +		arg.ssid = arvif->u.ap.ssid; +		arg.ssid_len = arvif->u.ap.ssid_len; +		arg.hidden_ssid = arvif->u.ap.hidden_ssid; + +		/* For now allow DFS for AP mode */ +		arg.channel.chan_radar = +			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR); +	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { +		arg.ssid = arvif->vif->bss_conf.ssid; +		arg.ssid_len = arvif->vif->bss_conf.ssid_len; +	} + +	ath10k_dbg(ATH10K_DBG_MAC, +		   "mac vdev %d start center_freq %d phymode %s\n", +		   arg.vdev_id, arg.channel.freq, +		   ath10k_wmi_phymode_str(arg.channel.mode)); + +	ret = ath10k_wmi_vdev_start(ar, &arg); +	if (ret) { +		ath10k_warn("failed to start WMI vdev %i: %d\n", +			    arg.vdev_id, ret); +		return ret; +	} + +	ret = ath10k_vdev_setup_sync(ar); +	if (ret) { +		ath10k_warn("failed to synchronise setup for vdev %i: %d\n", +			    arg.vdev_id, ret); +		return ret; +	} + +	ar->num_started_vdevs++; +	ath10k_recalc_radar_detection(ar); + +	return ret; +} + +static int ath10k_vdev_stop(struct ath10k_vif *arvif) +{ +	struct ath10k *ar = arvif->ar; +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	reinit_completion(&ar->vdev_setup_done); + +	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); +	if (ret) { +		ath10k_warn("failed to stop WMI vdev %i: %d\n", +			    arvif->vdev_id, ret); +		return ret; +	} + +	ret = ath10k_vdev_setup_sync(ar); +	if (ret) { +		ath10k_warn("failed to syncronise setup for vdev %i: %d\n", +			    arvif->vdev_id, ret); +		return ret; +	} + +	WARN_ON(ar->num_started_vdevs == 0); + +	if (ar->num_started_vdevs != 0) { +		ar->num_started_vdevs--; +		ath10k_recalc_radar_detection(ar); +	} +  	return ret;  } @@ -653,6 +900,22 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,  	if (!info->enable_beacon) {  		ath10k_vdev_stop(arvif); + +		arvif->is_started = false; +		arvif->is_up = false; + +		spin_lock_bh(&arvif->ar->data_lock); +		if (arvif->beacon) { +			dma_unmap_single(arvif->ar->dev, +					 ATH10K_SKB_CB(arvif->beacon)->paddr, +					 arvif->beacon->len, DMA_TO_DEVICE); +			dev_kfree_skb_any(arvif->beacon); + +			arvif->beacon = NULL; +			arvif->beacon_sent = false; +		} +		spin_unlock_bh(&arvif->ar->data_lock); +  		return;  	} @@ -662,19 +925,29 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,  	if (ret)  		return; -	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid); +	arvif->aid = 0; +	memcpy(arvif->bssid, info->bssid, ETH_ALEN); + +	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, +				 arvif->bssid);  	if (ret) { -		ath10k_warn("Failed to bring up VDEV: %d\n", -			    arvif->vdev_id); +		ath10k_warn("failed to bring up vdev %d: %i\n", +			    arvif->vdev_id, ret); +		ath10k_vdev_stop(arvif);  		return;  	} -	ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id); + +	arvif->is_started = true; +	arvif->is_up = true; + +	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);  }  static void ath10k_control_ibss(struct ath10k_vif *arvif,  				struct ieee80211_bss_conf *info,  				const u8 self_peer[ETH_ALEN])  { +	u32 vdev_param;  	int ret = 0;  	lockdep_assert_held(&arvif->ar->conf_mutex); @@ -682,84 +955,82 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,  	if (!info->ibss_joined) {  		ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);  		if (ret) -			ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n", +			ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",  				    self_peer, arvif->vdev_id, ret); -		if (is_zero_ether_addr(arvif->u.ibss.bssid)) +		if (is_zero_ether_addr(arvif->bssid))  			return;  		ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, -					 arvif->u.ibss.bssid); +					 arvif->bssid);  		if (ret) { -			ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n", -				    arvif->u.ibss.bssid, arvif->vdev_id, ret); +			ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", +				    arvif->bssid, arvif->vdev_id, ret);  			return;  		} -		memset(arvif->u.ibss.bssid, 0, ETH_ALEN); +		memset(arvif->bssid, 0, ETH_ALEN);  		return;  	}  	ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);  	if (ret) { -		ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n", +		ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",  			    self_peer, arvif->vdev_id, ret);  		return;  	} -	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, -					WMI_VDEV_PARAM_ATIM_WINDOW, +	vdev_param = arvif->ar->wmi.vdev_param->atim_window; +	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,  					ATH10K_DEFAULT_ATIM);  	if (ret) -		ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", +		ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",  			    arvif->vdev_id, ret);  }  /*   * Review this when mac80211 gains per-interface powersave support.   */ -static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)  { -	struct ath10k_generic_iter *ar_iter = data; -	struct ieee80211_conf *conf = &ar_iter->ar->hw->conf; -	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); +	struct ath10k *ar = arvif->ar; +	struct ieee80211_conf *conf = &ar->hw->conf;  	enum wmi_sta_powersave_param param;  	enum wmi_sta_ps_mode psmode;  	int ret;  	lockdep_assert_held(&arvif->ar->conf_mutex); -	if (vif->type != NL80211_IFTYPE_STATION) -		return; +	if (arvif->vif->type != NL80211_IFTYPE_STATION) +		return 0;  	if (conf->flags & IEEE80211_CONF_PS) {  		psmode = WMI_STA_PS_MODE_ENABLED;  		param = WMI_STA_PS_PARAM_INACTIVITY_TIME; -		ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar, -						  arvif->vdev_id, -						  param, +		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,  						  conf->dynamic_ps_timeout);  		if (ret) { -			ath10k_warn("Failed to set inactivity time for VDEV: %d\n", -				    arvif->vdev_id); -			return; +			ath10k_warn("failed to set inactivity time for vdev %d: %i\n", +				    arvif->vdev_id, ret); +			return ret;  		} - -		ar_iter->ret = ret;  	} else {  		psmode = WMI_STA_PS_MODE_DISABLED;  	} -	ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id, -					     psmode); -	if (ar_iter->ret) -		ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", -			    psmode, arvif->vdev_id); -	else -		ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n", -			   psmode, arvif->vdev_id); +	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", +		   arvif->vdev_id, psmode ? "enable" : "disable"); + +	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); +	if (ret) { +		ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n", +			    psmode, arvif->vdev_id, ret); +		return ret; +	} + +	return 0;  }  /**********************/ @@ -880,7 +1151,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,  				   struct wmi_peer_assoc_complete_arg *arg)  {  	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; -	int smps;  	int i, n;  	lockdep_assert_held(&ar->conf_mutex); @@ -926,17 +1196,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,  		arg->peer_flags |= WMI_PEER_STBC;  	} -	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; -	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; - -	if (smps == WLAN_HT_CAP_SM_PS_STATIC) { -		arg->peer_flags |= WMI_PEER_SPATIAL_MUX; -		arg->peer_flags |= WMI_PEER_STATIC_MIMOPS; -	} else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) { -		arg->peer_flags |= WMI_PEER_SPATIAL_MUX; -		arg->peer_flags |= WMI_PEER_DYN_MIMOPS; -	} -  	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])  		arg->peer_rate_caps |= WMI_RC_TS_FLAG;  	else if (ht_cap->mcs.rx_mask[1]) @@ -946,35 +1205,44 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,  		if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))  			arg->peer_ht_rates.rates[n++] = i; -	arg->peer_ht_rates.num_rates = n; -	arg->peer_num_spatial_streams = max((n+7) / 8, 1); +	/* +	 * This is a workaround for HT-enabled STAs which break the spec +	 * and have no HT capabilities RX mask (no HT RX MCS map). +	 * +	 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), +	 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. +	 * +	 * Firmware asserts if such situation occurs. +	 */ +	if (n == 0) { +		arg->peer_ht_rates.num_rates = 8; +		for (i = 0; i < arg->peer_ht_rates.num_rates; i++) +			arg->peer_ht_rates.rates[i] = i; +	} else { +		arg->peer_ht_rates.num_rates = n; +		arg->peer_num_spatial_streams = sta->rx_nss; +	} -	ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n", +	ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", +		   arg->addr,  		   arg->peer_ht_rates.num_rates,  		   arg->peer_num_spatial_streams);  } -static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, -				       struct ath10k_vif *arvif, -				       struct ieee80211_sta *sta, -				       struct ieee80211_bss_conf *bss_conf, -				       struct wmi_peer_assoc_complete_arg *arg) +static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, +				    struct ath10k_vif *arvif, +				    struct ieee80211_sta *sta)  {  	u32 uapsd = 0;  	u32 max_sp = 0; +	int ret = 0;  	lockdep_assert_held(&ar->conf_mutex); -	if (sta->wme) -		arg->peer_flags |= WMI_PEER_QOS; -  	if (sta->wme && sta->uapsd_queues) { -		ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n", +		ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",  			   sta->uapsd_queues, sta->max_sp); -		arg->peer_flags |= WMI_PEER_APSD; -		arg->peer_flags |= WMI_RC_UAPSD_FLAG; -  		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)  			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |  				 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; @@ -992,35 +1260,40 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,  		if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)  			max_sp = sta->max_sp; -		ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, -					   sta->addr, -					   WMI_AP_PS_PEER_PARAM_UAPSD, -					   uapsd); +		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, +						 sta->addr, +						 WMI_AP_PS_PEER_PARAM_UAPSD, +						 uapsd); +		if (ret) { +			ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n", +				    arvif->vdev_id, ret); +			return ret; +		} -		ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, -					   sta->addr, -					   WMI_AP_PS_PEER_PARAM_MAX_SP, -					   max_sp); +		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, +						 sta->addr, +						 WMI_AP_PS_PEER_PARAM_MAX_SP, +						 max_sp); +		if (ret) { +			ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n", +				    arvif->vdev_id, ret); +			return ret; +		}  		/* TODO setup this based on STA listen interval and  		   beacon interval. Currently we don't know  		   sta->listen_interval - mac80211 patch required.  		   Currently use 10 seconds */ -		ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, -					   sta->addr, -					   WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, -					   10); +		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, +					WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10); +		if (ret) { +			ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n", +				    arvif->vdev_id, ret); +			return ret; +		}  	} -} -static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar, -					struct ath10k_vif *arvif, -					struct ieee80211_sta *sta, -					struct ieee80211_bss_conf *bss_conf, -					struct wmi_peer_assoc_complete_arg *arg) -{ -	if (bss_conf->qos) -		arg->peer_flags |= WMI_PEER_QOS; +	return 0;  }  static void ath10k_peer_assoc_h_vht(struct ath10k *ar, @@ -1028,14 +1301,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,  				    struct wmi_peer_assoc_complete_arg *arg)  {  	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; +	u8 ampdu_factor;  	if (!vht_cap->vht_supported)  		return;  	arg->peer_flags |= WMI_PEER_VHT; -  	arg->peer_vht_caps = vht_cap->cap; + +	ampdu_factor = (vht_cap->cap & +			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> +		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + +	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to +	 * zero in VHT IE. Using it would result in degraded throughput. +	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep +	 * it if VHT max_mpdu is smaller. */ +	arg->peer_max_mpdu = max(arg->peer_max_mpdu, +				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + +					ampdu_factor)) - 1); +  	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)  		arg->peer_flags |= WMI_PEER_80MHZ; @@ -1048,7 +1334,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,  	arg->peer_vht_rates.tx_mcs_set =  		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); -	ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n"); +	ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", +		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);  }  static void ath10k_peer_assoc_h_qos(struct ath10k *ar, @@ -1059,10 +1346,17 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,  {  	switch (arvif->vdev_type) {  	case WMI_VDEV_TYPE_AP: -		ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg); +		if (sta->wme) +			arg->peer_flags |= WMI_PEER_QOS; + +		if (sta->wme && sta->uapsd_queues) { +			arg->peer_flags |= WMI_PEER_APSD; +			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; +		}  		break;  	case WMI_VDEV_TYPE_STA: -		ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg); +		if (bss_conf->qos) +			arg->peer_flags |= WMI_PEER_QOS;  		break;  	default:  		break; @@ -1076,8 +1370,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,  {  	enum wmi_phy_mode phymode = MODE_UNKNOWN; -	/* FIXME: add VHT */ -  	switch (ar->hw->conf.chandef.chan->band) {  	case IEEE80211_BAND_2GHZ:  		if (sta->ht_cap.ht_supported) { @@ -1091,7 +1383,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,  		break;  	case IEEE80211_BAND_5GHZ: -		if (sta->ht_cap.ht_supported) { +		/* +		 * Check VHT first. +		 */ +		if (sta->vht_cap.vht_supported) { +			if (sta->bandwidth == IEEE80211_STA_RX_BW_80) +				phymode = MODE_11AC_VHT80; +			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40) +				phymode = MODE_11AC_VHT40; +			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) +				phymode = MODE_11AC_VHT20; +		} else if (sta->ht_cap.ht_supported) {  			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)  				phymode = MODE_11NA_HT40;  			else @@ -1105,30 +1407,59 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,  		break;  	} +	ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", +		   sta->addr, ath10k_wmi_phymode_str(phymode)); +  	arg->peer_phymode = phymode;  	WARN_ON(phymode == MODE_UNKNOWN);  } -static int ath10k_peer_assoc(struct ath10k *ar, -			     struct ath10k_vif *arvif, -			     struct ieee80211_sta *sta, -			     struct ieee80211_bss_conf *bss_conf) +static int ath10k_peer_assoc_prepare(struct ath10k *ar, +				     struct ath10k_vif *arvif, +				     struct ieee80211_sta *sta, +				     struct ieee80211_bss_conf *bss_conf, +				     struct wmi_peer_assoc_complete_arg *arg)  { -	struct wmi_peer_assoc_complete_arg arg; -  	lockdep_assert_held(&ar->conf_mutex); -	memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); +	memset(arg, 0, sizeof(*arg)); -	ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); -	ath10k_peer_assoc_h_crypto(ar, arvif, &arg); -	ath10k_peer_assoc_h_rates(ar, sta, &arg); -	ath10k_peer_assoc_h_ht(ar, sta, &arg); -	ath10k_peer_assoc_h_vht(ar, sta, &arg); -	ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg); -	ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg); +	ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg); +	ath10k_peer_assoc_h_crypto(ar, arvif, arg); +	ath10k_peer_assoc_h_rates(ar, sta, arg); +	ath10k_peer_assoc_h_ht(ar, sta, arg); +	ath10k_peer_assoc_h_vht(ar, sta, arg); +	ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg); +	ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg); -	return ath10k_wmi_peer_assoc(ar, &arg); +	return 0; +} + +static const u32 ath10k_smps_map[] = { +	[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, +	[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, +	[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, +	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, +}; + +static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, +				  const u8 *addr, +				  const struct ieee80211_sta_ht_cap *ht_cap) +{ +	int smps; + +	if (!ht_cap->ht_supported) +		return 0; + +	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; +	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + +	if (smps >= ARRAY_SIZE(ath10k_smps_map)) +		return -EINVAL; + +	return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, +					 WMI_PEER_SMPS_STATE, +					 ath10k_smps_map[smps]);  }  /* can be called only in mac80211 callbacks due to `key_count` usage */ @@ -1138,6 +1469,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,  {  	struct ath10k *ar = hw->priv;  	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); +	struct ieee80211_sta_ht_cap ht_cap; +	struct wmi_peer_assoc_complete_arg peer_arg;  	struct ieee80211_sta *ap_sta;  	int ret; @@ -1147,30 +1480,56 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,  	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);  	if (!ap_sta) { -		ath10k_warn("Failed to find station entry for %pM\n", -			    bss_conf->bssid); +		ath10k_warn("failed to find station entry for bss %pM vdev %i\n", +			    bss_conf->bssid, arvif->vdev_id);  		rcu_read_unlock();  		return;  	} -	ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf); +	/* ap_sta must be accessed only within rcu section which must be left +	 * before calling ath10k_setup_peer_smps() which might sleep. */ +	ht_cap = ap_sta->ht_cap; + +	ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, +					bss_conf, &peer_arg);  	if (ret) { -		ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid); +		ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n", +			    bss_conf->bssid, arvif->vdev_id, ret);  		rcu_read_unlock();  		return;  	}  	rcu_read_unlock(); -	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid, -				 bss_conf->bssid); -	if (ret) -		ath10k_warn("VDEV: %d up failed: ret %d\n", +	ret = ath10k_wmi_peer_assoc(ar, &peer_arg); +	if (ret) { +		ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n", +			    bss_conf->bssid, arvif->vdev_id, ret); +		return; +	} + +	ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); +	if (ret) { +		ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",  			    arvif->vdev_id, ret); -	else -		ath10k_dbg(ATH10K_DBG_MAC, -			   "VDEV: %d associated, BSSID: %pM, AID: %d\n", -			   arvif->vdev_id, bss_conf->bssid, bss_conf->aid); +		return; +	} + +	ath10k_dbg(ATH10K_DBG_MAC, +		   "mac vdev %d up (associated) bssid %pM aid %d\n", +		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid); + +	arvif->aid = bss_conf->aid; +	memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN); + +	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); +	if (ret) { +		ath10k_warn("failed to set vdev %d up: %d\n", +			    arvif->vdev_id, ret); +		return; +	} + +	arvif->is_up = true;  }  /* @@ -1191,10 +1550,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,  	 * No idea why this happens, even though VDEV-DOWN is supposed  	 * to be analogous to link down, so just stop the VDEV.  	 */ +	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", +		   arvif->vdev_id); + +	/* FIXME: check return value */  	ret = ath10k_vdev_stop(arvif); -	if (!ret) -		ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n", -			   arvif->vdev_id);  	/*  	 * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and @@ -1203,32 +1563,68 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,  	 * interfaces as it expects there is no rx when no interface is  	 * running.  	 */ +	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); + +	/* FIXME: why don't we print error if wmi call fails? */  	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); -	if (ret) -		ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n", -			   arvif->vdev_id, ret); -	ath10k_wmi_flush_tx(ar); +	arvif->def_wep_key_idx = 0; -	arvif->def_wep_key_index = 0; +	arvif->is_started = false; +	arvif->is_up = false;  }  static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, -				struct ieee80211_sta *sta) +				struct ieee80211_sta *sta, bool reassoc)  { +	struct wmi_peer_assoc_complete_arg peer_arg;  	int ret = 0;  	lockdep_assert_held(&ar->conf_mutex); -	ret = ath10k_peer_assoc(ar, arvif, sta, NULL); +	ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);  	if (ret) { -		ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); +		ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n", +			    sta->addr, arvif->vdev_id, ret);  		return ret;  	} +	peer_arg.peer_reassoc = reassoc; +	ret = ath10k_wmi_peer_assoc(ar, &peer_arg); +	if (ret) { +		ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n", +			    sta->addr, arvif->vdev_id, ret); +		return ret; +	} + +	ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); +	if (ret) { +		ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n", +			    arvif->vdev_id, ret); +		return ret; +	} + +	if (!sta->wme) { +		arvif->num_legacy_stations++; +		ret  = ath10k_recalc_rtscts_prot(arvif); +		if (ret) { +			ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", +				    arvif->vdev_id, ret); +			return ret; +		} +	} +  	ret = ath10k_install_peer_wep_keys(arvif, sta->addr);  	if (ret) { -		ath10k_warn("could not install peer wep keys (%d)\n", ret); +		ath10k_warn("failed to install peer wep keys for vdev %i: %d\n", +			    arvif->vdev_id, ret); +		return ret; +	} + +	ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); +	if (ret) { +		ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n", +			    sta->addr, arvif->vdev_id, ret);  		return ret;  	} @@ -1242,9 +1638,20 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,  	lockdep_assert_held(&ar->conf_mutex); +	if (!sta->wme) { +		arvif->num_legacy_stations--; +		ret = ath10k_recalc_rtscts_prot(arvif); +		if (ret) { +			ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", +				    arvif->vdev_id, ret); +			return ret; +		} +	} +  	ret = ath10k_clear_peer_keys(arvif, sta->addr);  	if (ret) { -		ath10k_warn("could not clear all peer wep keys (%d)\n", ret); +		ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n", +			    arvif->vdev_id, ret);  		return ret;  	} @@ -1306,19 +1713,22 @@ static int ath10k_update_channel_list(struct ath10k *ar)  			ch->allow_vht = true;  			ch->allow_ibss = -				!(channel->flags & IEEE80211_CHAN_NO_IBSS); +				!(channel->flags & IEEE80211_CHAN_NO_IR);  			ch->ht40plus =  				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); -			passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN; +			ch->chan_radar = +				!!(channel->flags & IEEE80211_CHAN_RADAR); + +			passive = channel->flags & IEEE80211_CHAN_NO_IR;  			ch->passive = passive;  			ch->freq = channel->center_freq; -			ch->min_power = channel->max_power * 3; -			ch->max_power = channel->max_power * 4; -			ch->max_reg_power = channel->max_reg_power * 4; -			ch->max_antenna_gain = channel->max_antenna_gain; +			ch->min_power = 0; +			ch->max_power = channel->max_power * 2; +			ch->max_reg_power = channel->max_reg_power * 2; +			ch->max_antenna_gain = channel->max_antenna_gain * 2;  			ch->reg_class_id = 0; /* FIXME */  			/* FIXME: why use only legacy modes, why not any @@ -1333,8 +1743,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)  				continue;  			ath10k_dbg(ATH10K_DBG_WMI, -				   "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", -				   __func__, ch - arg.channels, arg.n_channels, +				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", +				    ch - arg.channels, arg.n_channels,  				   ch->freq, ch->max_power, ch->max_reg_power,  				   ch->max_antenna_gain, ch->mode); @@ -1348,29 +1758,55 @@ static int ath10k_update_channel_list(struct ath10k *ar)  	return ret;  } +static enum wmi_dfs_region +ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) +{ +	switch (dfs_region) { +	case NL80211_DFS_UNSET: +		return WMI_UNINIT_DFS_DOMAIN; +	case NL80211_DFS_FCC: +		return WMI_FCC_DFS_DOMAIN; +	case NL80211_DFS_ETSI: +		return WMI_ETSI_DFS_DOMAIN; +	case NL80211_DFS_JP: +		return WMI_MKK4_DFS_DOMAIN; +	} +	return WMI_UNINIT_DFS_DOMAIN; +} +  static void ath10k_regd_update(struct ath10k *ar)  {  	struct reg_dmn_pair_mapping *regpair;  	int ret; +	enum wmi_dfs_region wmi_dfs_reg; +	enum nl80211_dfs_regions nl_dfs_reg;  	lockdep_assert_held(&ar->conf_mutex);  	ret = ath10k_update_channel_list(ar);  	if (ret) -		ath10k_warn("could not update channel list (%d)\n", ret); +		ath10k_warn("failed to update channel list: %d\n", ret);  	regpair = ar->ath_common.regulatory.regpair; +	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { +		nl_dfs_reg = ar->dfs_detector->region; +		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); +	} else { +		wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; +	} +  	/* Target allows setting up per-band regdomain but ath_common provides  	 * a combined one only */  	ret = ath10k_wmi_pdev_set_regdomain(ar, -					    regpair->regDmnEnum, -					    regpair->regDmnEnum, /* 2ghz */ -					    regpair->regDmnEnum, /* 5ghz */ +					    regpair->reg_domain, +					    regpair->reg_domain, /* 2ghz */ +					    regpair->reg_domain, /* 5ghz */  					    regpair->reg_2ghz_ctl, -					    regpair->reg_5ghz_ctl); +					    regpair->reg_5ghz_ctl, +					    wmi_dfs_reg);  	if (ret) -		ath10k_warn("could not set pdev regdomain (%d)\n", ret); +		ath10k_warn("failed to set pdev regdomain: %d\n", ret);  }  static void ath10k_reg_notifier(struct wiphy *wiphy, @@ -1378,9 +1814,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,  {  	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);  	struct ath10k *ar = hw->priv; +	bool result;  	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); +	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { +		ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", +			   request->dfs_region); +		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, +							  request->dfs_region); +		if (!result) +			ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n", +				    request->dfs_region); +	} +  	mutex_lock(&ar->conf_mutex);  	if (ar->state == ATH10K_STATE_ON)  		ath10k_regd_update(ar); @@ -1391,6 +1838,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,  /* TX handlers */  /***************/ +static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr) +{ +	if (ieee80211_is_mgmt(hdr->frame_control)) +		return HTT_DATA_TX_EXT_TID_MGMT; + +	if (!ieee80211_is_data_qos(hdr->frame_control)) +		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; + +	if (!is_unicast_ether_addr(ieee80211_get_DA(hdr))) +		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; + +	return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; +} + +static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, +				  struct ieee80211_tx_info *info) +{ +	if (info->control.vif) +		return ath10k_vif_to_arvif(info->control.vif)->vdev_id; + +	if (ar->monitor_started) +		return ar->monitor_vdev_id; + +	ath10k_warn("failed to resolve vdev id\n"); +	return 0; +} +  /*   * Frames sent to the FW have to be in "Native Wifi" format.   * Strip the QoS field from the 802.11 header. @@ -1411,6 +1885,40 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,  	skb_pull(skb, IEEE80211_QOS_CTL_LEN);  } +static void ath10k_tx_wep_key_work(struct work_struct *work) +{ +	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, +						wep_key_work); +	int ret, keyidx = arvif->def_wep_key_newidx; + +	mutex_lock(&arvif->ar->conf_mutex); + +	if (arvif->ar->state != ATH10K_STATE_ON) +		goto unlock; + +	if (arvif->def_wep_key_idx == keyidx) +		goto unlock; + +	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", +		   arvif->vdev_id, keyidx); + +	ret = ath10k_wmi_vdev_set_param(arvif->ar, +					arvif->vdev_id, +					arvif->ar->wmi.vdev_param->def_keyid, +					keyidx); +	if (ret) { +		ath10k_warn("failed to update wep key index for vdev %d: %d\n", +			    arvif->vdev_id, +			    ret); +		goto unlock; +	} + +	arvif->def_wep_key_idx = keyidx; + +unlock: +	mutex_unlock(&arvif->ar->conf_mutex); +} +  static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)  {  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1419,11 +1927,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)  	struct ath10k *ar = arvif->ar;  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;  	struct ieee80211_key_conf *key = info->control.hw_key; -	int ret; - -	/* TODO AP mode should be implemented */ -	if (vif->type != NL80211_IFTYPE_STATION) -		return;  	if (!ieee80211_has_protected(hdr->frame_control))  		return; @@ -1435,20 +1938,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)  	    key->cipher != WLAN_CIPHER_SUITE_WEP104)  		return; -	if (key->keyidx == arvif->def_wep_key_index) +	if (key->keyidx == arvif->def_wep_key_idx)  		return; -	ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx); - -	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, -					WMI_VDEV_PARAM_DEF_KEYID, -					key->keyidx); -	if (ret) { -		ath10k_warn("could not update wep keyidx (%d)\n", ret); -		return; -	} - -	arvif->def_wep_key_index = key->keyidx; +	/* FIXME: Most likely a few frames will be TXed with an old key. Simply +	 * queueing frames until key index is updated is not an option because +	 * sk_buff may need more processing to be done, e.g. offchannel */ +	arvif->def_wep_key_newidx = key->keyidx; +	ieee80211_queue_work(ar->hw, &arvif->wep_key_work);  }  static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) @@ -1478,21 +1975,44 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)  static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)  {  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; -	int ret; +	int ret = 0; -	if (ieee80211_is_mgmt(hdr->frame_control)) -		ret = ath10k_htt_mgmt_tx(&ar->htt, skb); -	else if (ieee80211_is_nullfunc(hdr->frame_control)) +	if (ar->htt.target_version_major >= 3) { +		/* Since HTT 3.0 there is no separate mgmt tx command */ +		ret = ath10k_htt_tx(&ar->htt, skb); +		goto exit; +	} + +	if (ieee80211_is_mgmt(hdr->frame_control)) { +		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, +			     ar->fw_features)) { +			if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >= +			    ATH10K_MAX_NUM_MGMT_PENDING) { +				ath10k_warn("reached WMI management tranmist queue limit\n"); +				ret = -EBUSY; +				goto exit; +			} + +			skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb); +			ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); +		} else { +			ret = ath10k_htt_mgmt_tx(&ar->htt, skb); +		} +	} else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, +			     ar->fw_features) && +		   ieee80211_is_nullfunc(hdr->frame_control)) {  		/* FW does not report tx status properly for NullFunc frames  		 * unless they are sent through mgmt tx path. mac80211 sends -		 * those frames when it detects link/beacon loss and depends on -		 * the tx status to be correct. */ +		 * those frames when it detects link/beacon loss and depends +		 * on the tx status to be correct. */  		ret = ath10k_htt_mgmt_tx(&ar->htt, skb); -	else +	} else {  		ret = ath10k_htt_tx(&ar->htt, skb); +	} +exit:  	if (ret) { -		ath10k_warn("tx failed (%d). dropping packet.\n", ret); +		ath10k_warn("failed to transmit packet, dropping: %d\n", ret);  		ieee80211_free_txskb(ar->hw, skb);  	}  } @@ -1534,30 +2054,31 @@ void ath10k_offchan_tx_work(struct work_struct *work)  		mutex_lock(&ar->conf_mutex); -		ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n", +		ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",  			   skb);  		hdr = (struct ieee80211_hdr *)skb->data;  		peer_addr = ieee80211_get_DA(hdr); -		vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id; +		vdev_id = ATH10K_SKB_CB(skb)->vdev_id;  		spin_lock_bh(&ar->data_lock);  		peer = ath10k_peer_find(ar, vdev_id, peer_addr);  		spin_unlock_bh(&ar->data_lock);  		if (peer) +			/* FIXME: should this use ath10k_warn()? */  			ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",  				   peer_addr, vdev_id);  		if (!peer) {  			ret = ath10k_peer_create(ar, vdev_id, peer_addr);  			if (ret) -				ath10k_warn("peer %pM on vdev %d not created (%d)\n", +				ath10k_warn("failed to create peer %pM on vdev %d: %d\n",  					    peer_addr, vdev_id, ret);  		}  		spin_lock_bh(&ar->data_lock); -		INIT_COMPLETION(ar->offchan_tx_completed); +		reinit_completion(&ar->offchan_tx_completed);  		ar->offchan_tx_skb = skb;  		spin_unlock_bh(&ar->data_lock); @@ -1572,7 +2093,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)  		if (!peer) {  			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);  			if (ret) -				ath10k_warn("peer %pM on vdev %d not deleted (%d)\n", +				ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",  					    peer_addr, vdev_id, ret);  		} @@ -1580,6 +2101,39 @@ void ath10k_offchan_tx_work(struct work_struct *work)  	}  } +void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) +{ +	struct sk_buff *skb; + +	for (;;) { +		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); +		if (!skb) +			break; + +		ieee80211_free_txskb(ar->hw, skb); +	} +} + +void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) +{ +	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); +	struct sk_buff *skb; +	int ret; + +	for (;;) { +		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); +		if (!skb) +			break; + +		ret = ath10k_wmi_mgmt_tx(ar, skb); +		if (ret) { +			ath10k_warn("failed to transmit management frame via WMI: %d\n", +				    ret); +			ieee80211_free_txskb(ar->hw, skb); +		} +	} +} +  /************/  /* Scanning */  /************/ @@ -1599,7 +2153,7 @@ void ath10k_reset_scan(unsigned long ptr)  		return;  	} -	ath10k_warn("scan timeout. resetting. fw issue?\n"); +	ath10k_warn("scan timed out, firmware problem?\n");  	if (ar->scan.is_roc)  		ieee80211_remain_on_channel_expired(ar->hw); @@ -1635,7 +2189,7 @@ static int ath10k_abort_scan(struct ath10k *ar)  	ret = ath10k_wmi_stop_scan(ar, &arg);  	if (ret) { -		ath10k_warn("could not submit wmi stop scan (%d)\n", ret); +		ath10k_warn("failed to stop wmi scan: %d\n", ret);  		spin_lock_bh(&ar->data_lock);  		ar->scan.in_progress = false;  		ath10k_offchan_tx_purge(ar); @@ -1643,8 +2197,6 @@ static int ath10k_abort_scan(struct ath10k *ar)  		return -EIO;  	} -	ath10k_wmi_flush_tx(ar); -  	ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);  	if (ret == 0)  		ath10k_warn("timed out while waiting for scan to stop\n"); @@ -1657,7 +2209,7 @@ static int ath10k_abort_scan(struct ath10k *ar)  	spin_lock_bh(&ar->data_lock);  	if (ar->scan.in_progress) { -		ath10k_warn("could not stop scan. its still in progress\n"); +		ath10k_warn("failed to stop scan, it's still in progress\n");  		ar->scan.in_progress = false;  		ath10k_offchan_tx_purge(ar);  		ret = -ETIMEDOUT; @@ -1678,10 +2230,6 @@ static int ath10k_start_scan(struct ath10k *ar,  	if (ret)  		return ret; -	/* make sure we submit the command so the completion -	* timeout makes sense */ -	ath10k_wmi_flush_tx(ar); -  	ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);  	if (ret == 0) {  		ath10k_abort_scan(ar); @@ -1709,16 +2257,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;  	struct ath10k *ar = hw->priv; -	struct ath10k_vif *arvif = NULL; -	u32 vdev_id = 0; -	u8 tid; - -	if (info->control.vif) { -		arvif = ath10k_vif_to_arvif(info->control.vif); -		vdev_id = arvif->vdev_id; -	} else if (ar->monitor_enabled) { -		vdev_id = ar->monitor_vdev_id; -	} +	u8 tid, vdev_id;  	/* We should disable CCK RATE due to P2P */  	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) @@ -1726,12 +2265,8 @@ static void ath10k_tx(struct ieee80211_hw *hw,  	/* we must calculate tid before we apply qos workaround  	 * as we'd lose the qos control field */ -	tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; -	if (ieee80211_is_data_qos(hdr->frame_control) && -	    is_unicast_ether_addr(ieee80211_get_DA(hdr))) { -		u8 *qc = ieee80211_get_qos_ctl(hdr); -		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; -	} +	tid = ath10k_tx_h_get_tid(hdr); +	vdev_id = ath10k_tx_h_get_vdev_id(ar, info);  	/* it makes no sense to process injected frames like that */  	if (info->control.vif && @@ -1742,14 +2277,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,  		ath10k_tx_h_seq_no(skb);  	} -	memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); -	ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; +	ATH10K_SKB_CB(skb)->vdev_id = vdev_id; +	ATH10K_SKB_CB(skb)->htt.is_offchan = false;  	ATH10K_SKB_CB(skb)->htt.tid = tid;  	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {  		spin_lock_bh(&ar->data_lock);  		ATH10K_SKB_CB(skb)->htt.is_offchan = true; -		ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id; +		ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;  		spin_unlock_bh(&ar->data_lock);  		ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); @@ -1762,117 +2297,330 @@ static void ath10k_tx(struct ieee80211_hw *hw,  	ath10k_tx_htt(ar, skb);  } -/* - * Initialize various parameters with default vaules. - */ +/* Must not be called with conf_mutex held as workers can use that also. */ +static void ath10k_drain_tx(struct ath10k *ar) +{ +	/* make sure rcu-protected mac80211 tx path itself is drained */ +	synchronize_net(); + +	ath10k_offchan_tx_purge(ar); +	ath10k_mgmt_over_wmi_tx_purge(ar); + +	cancel_work_sync(&ar->offchan_tx_work); +	cancel_work_sync(&ar->wmi_mgmt_tx_work); +} +  void ath10k_halt(struct ath10k *ar)  { +	struct ath10k_vif *arvif; +  	lockdep_assert_held(&ar->conf_mutex); +	if (ath10k_monitor_is_enabled(ar)) { +		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); +		ar->promisc = false; +		ar->monitor = false; +		ath10k_monitor_stop(ar); +	} +  	del_timer_sync(&ar->scan.timeout); -	ath10k_offchan_tx_purge(ar); +	ath10k_reset_scan((unsigned long)ar);  	ath10k_peer_cleanup_all(ar);  	ath10k_core_stop(ar);  	ath10k_hif_power_down(ar);  	spin_lock_bh(&ar->data_lock); -	if (ar->scan.in_progress) { -		del_timer(&ar->scan.timeout); -		ar->scan.in_progress = false; -		ieee80211_scan_completed(ar->hw, true); +	list_for_each_entry(arvif, &ar->arvifs, list) { +		if (!arvif->beacon) +			continue; + +		dma_unmap_single(arvif->ar->dev, +				 ATH10K_SKB_CB(arvif->beacon)->paddr, +				 arvif->beacon->len, DMA_TO_DEVICE); +		dev_kfree_skb_any(arvif->beacon); +		arvif->beacon = NULL;  	}  	spin_unlock_bh(&ar->data_lock);  } +static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +{ +	struct ath10k *ar = hw->priv; + +	mutex_lock(&ar->conf_mutex); + +	if (ar->cfg_tx_chainmask) { +		*tx_ant = ar->cfg_tx_chainmask; +		*rx_ant = ar->cfg_rx_chainmask; +	} else { +		*tx_ant = ar->supp_tx_chainmask; +		*rx_ant = ar->supp_rx_chainmask; +	} + +	mutex_unlock(&ar->conf_mutex); + +	return 0; +} + +static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) +{ +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	ar->cfg_tx_chainmask = tx_ant; +	ar->cfg_rx_chainmask = rx_ant; + +	if ((ar->state != ATH10K_STATE_ON) && +	    (ar->state != ATH10K_STATE_RESTARTED)) +		return 0; + +	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, +					tx_ant); +	if (ret) { +		ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n", +			    ret, tx_ant); +		return ret; +	} + +	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, +					rx_ant); +	if (ret) { +		ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n", +			    ret, rx_ant); +		return ret; +	} + +	return 0; +} + +static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ +	struct ath10k *ar = hw->priv; +	int ret; + +	mutex_lock(&ar->conf_mutex); +	ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); +	mutex_unlock(&ar->conf_mutex); +	return ret; +} +  static int ath10k_start(struct ieee80211_hw *hw)  {  	struct ath10k *ar = hw->priv;  	int ret = 0; +	/* +	 * This makes sense only when restarting hw. It is harmless to call +	 * uncoditionally. This is necessary to make sure no HTT/WMI tx +	 * commands will be submitted while restarting. +	 */ +	ath10k_drain_tx(ar); +  	mutex_lock(&ar->conf_mutex); -	if (ar->state != ATH10K_STATE_OFF && -	    ar->state != ATH10K_STATE_RESTARTING) { +	switch (ar->state) { +	case ATH10K_STATE_OFF: +		ar->state = ATH10K_STATE_ON; +		break; +	case ATH10K_STATE_RESTARTING: +		ath10k_halt(ar); +		ar->state = ATH10K_STATE_RESTARTED; +		break; +	case ATH10K_STATE_ON: +	case ATH10K_STATE_RESTARTED: +	case ATH10K_STATE_WEDGED: +		WARN_ON(1);  		ret = -EINVAL; -		goto exit; +		goto err;  	}  	ret = ath10k_hif_power_up(ar);  	if (ret) { -		ath10k_err("could not init hif (%d)\n", ret); -		ar->state = ATH10K_STATE_OFF; -		goto exit; +		ath10k_err("Could not init hif: %d\n", ret); +		goto err_off;  	}  	ret = ath10k_core_start(ar);  	if (ret) { -		ath10k_err("could not init core (%d)\n", ret); -		ath10k_hif_power_down(ar); -		ar->state = ATH10K_STATE_OFF; -		goto exit; +		ath10k_err("Could not init core: %d\n", ret); +		goto err_power_down;  	} -	if (ar->state == ATH10K_STATE_OFF) -		ar->state = ATH10K_STATE_ON; -	else if (ar->state == ATH10K_STATE_RESTARTING) -		ar->state = ATH10K_STATE_RESTARTED; +	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); +	if (ret) { +		ath10k_warn("failed to enable PMF QOS: %d\n", ret); +		goto err_core_stop; +	} -	ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); -	if (ret) -		ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", -			    ret); +	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); +	if (ret) { +		ath10k_warn("failed to enable dynamic BW: %d\n", ret); +		goto err_core_stop; +	} -	ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0); -	if (ret) -		ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", +	if (ar->cfg_tx_chainmask) +		__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, +				     ar->cfg_rx_chainmask); + +	/* +	 * By default FW set ARP frames ac to voice (6). In that case ARP +	 * exchange is not working properly for UAPSD enabled AP. ARP requests +	 * which arrives with access category 0 are processed by network stack +	 * and send back with access category 0, but FW changes access category +	 * to 6. Set ARP frames access category to best effort (0) solves +	 * this problem. +	 */ + +	ret = ath10k_wmi_pdev_set_param(ar, +					ar->wmi.pdev_param->arp_ac_override, 0); +	if (ret) { +		ath10k_warn("failed to set arp ac override parameter: %d\n",  			    ret); +		goto err_core_stop; +	} +	ar->num_started_vdevs = 0;  	ath10k_regd_update(ar); -exit:  	mutex_unlock(&ar->conf_mutex);  	return 0; + +err_core_stop: +	ath10k_core_stop(ar); + +err_power_down: +	ath10k_hif_power_down(ar); + +err_off: +	ar->state = ATH10K_STATE_OFF; + +err: +	mutex_unlock(&ar->conf_mutex); +	return ret;  }  static void ath10k_stop(struct ieee80211_hw *hw)  {  	struct ath10k *ar = hw->priv; +	ath10k_drain_tx(ar); +  	mutex_lock(&ar->conf_mutex); -	if (ar->state == ATH10K_STATE_ON || -	    ar->state == ATH10K_STATE_RESTARTED || -	    ar->state == ATH10K_STATE_WEDGED) +	if (ar->state != ATH10K_STATE_OFF) {  		ath10k_halt(ar); - -	ar->state = ATH10K_STATE_OFF; +		ar->state = ATH10K_STATE_OFF; +	}  	mutex_unlock(&ar->conf_mutex); -	cancel_work_sync(&ar->offchan_tx_work);  	cancel_work_sync(&ar->restart_work);  } -static void ath10k_config_ps(struct ath10k *ar) +static int ath10k_config_ps(struct ath10k *ar)  { -	struct ath10k_generic_iter ar_iter; +	struct ath10k_vif *arvif; +	int ret = 0;  	lockdep_assert_held(&ar->conf_mutex); -	/* During HW reconfiguration mac80211 reports all interfaces that were -	 * running until reconfiguration was started. Since FW doesn't have any -	 * vdevs at this point we must not iterate over this interface list. -	 * This setting will be updated upon add_interface(). */ -	if (ar->state == ATH10K_STATE_RESTARTED) -		return; +	list_for_each_entry(arvif, &ar->arvifs, list) { +		ret = ath10k_mac_vif_setup_ps(arvif); +		if (ret) { +			ath10k_warn("failed to setup powersave: %d\n", ret); +			break; +		} +	} -	memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); -	ar_iter.ar = ar; +	return ret; +} -	ieee80211_iterate_active_interfaces_atomic( -		ar->hw, IEEE80211_IFACE_ITER_NORMAL, -		ath10k_ps_iter, &ar_iter); +static const char *chandef_get_width(enum nl80211_chan_width width) +{ +	switch (width) { +	case NL80211_CHAN_WIDTH_20_NOHT: +		return "20 (noht)"; +	case NL80211_CHAN_WIDTH_20: +		return "20"; +	case NL80211_CHAN_WIDTH_40: +		return "40"; +	case NL80211_CHAN_WIDTH_80: +		return "80"; +	case NL80211_CHAN_WIDTH_80P80: +		return "80+80"; +	case NL80211_CHAN_WIDTH_160: +		return "160"; +	case NL80211_CHAN_WIDTH_5: +		return "5"; +	case NL80211_CHAN_WIDTH_10: +		return "10"; +	} +	return "?"; +} -	if (ar_iter.ret) -		ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret); +static void ath10k_config_chan(struct ath10k *ar) +{ +	struct ath10k_vif *arvif; +	int ret; + +	lockdep_assert_held(&ar->conf_mutex); + +	ath10k_dbg(ATH10K_DBG_MAC, +		   "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n", +		   ar->chandef.chan->center_freq, +		   ar->chandef.center_freq1, +		   ar->chandef.center_freq2, +		   chandef_get_width(ar->chandef.width)); + +	/* First stop monitor interface. Some FW versions crash if there's a +	 * lone monitor interface. */ +	if (ar->monitor_started) +		ath10k_monitor_vdev_stop(ar); + +	list_for_each_entry(arvif, &ar->arvifs, list) { +		if (!arvif->is_started) +			continue; + +		if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) +			continue; + +		ret = ath10k_vdev_stop(arvif); +		if (ret) { +			ath10k_warn("failed to stop vdev %d: %d\n", +				    arvif->vdev_id, ret); +			continue; +		} +	} + +	/* all vdevs are now stopped - now attempt to restart them */ + +	list_for_each_entry(arvif, &ar->arvifs, list) { +		if (!arvif->is_started) +			continue; + +		if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) +			continue; + +		ret = ath10k_vdev_start(arvif); +		if (ret) { +			ath10k_warn("failed to start vdev %d: %d\n", +				    arvif->vdev_id, ret); +			continue; +		} + +		if (!arvif->is_up) +			continue; + +		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, +					 arvif->bssid); +		if (ret) { +			ath10k_warn("failed to bring vdev up %d: %d\n", +				    arvif->vdev_id, ret); +			continue; +		} +	} + +	if (ath10k_monitor_is_enabled(ar)) +		ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);  }  static int ath10k_config(struct ieee80211_hw *hw, u32 changed) @@ -1880,28 +2628,68 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)  	struct ath10k *ar = hw->priv;  	struct ieee80211_conf *conf = &hw->conf;  	int ret = 0; +	u32 param;  	mutex_lock(&ar->conf_mutex);  	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { -		ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n", -			   conf->chandef.chan->center_freq); +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac config channel %dMHz flags 0x%x radar %d\n", +			   conf->chandef.chan->center_freq, +			   conf->chandef.chan->flags, +			   conf->radar_enabled); +  		spin_lock_bh(&ar->data_lock);  		ar->rx_channel = conf->chandef.chan;  		spin_unlock_bh(&ar->data_lock); + +		ar->radar_enabled = conf->radar_enabled; +		ath10k_recalc_radar_detection(ar); + +		if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) { +			ar->chandef = conf->chandef; +			ath10k_config_chan(ar); +		} +	} + +	if (changed & IEEE80211_CONF_CHANGE_POWER) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n", +			   hw->conf.power_level); + +		param = ar->wmi.pdev_param->txpower_limit2g; +		ret = ath10k_wmi_pdev_set_param(ar, param, +						hw->conf.power_level * 2); +		if (ret) +			ath10k_warn("failed to set 2g txpower %d: %d\n", +				    hw->conf.power_level, ret); + +		param = ar->wmi.pdev_param->txpower_limit5g; +		ret = ath10k_wmi_pdev_set_param(ar, param, +						hw->conf.power_level * 2); +		if (ret) +			ath10k_warn("failed to set 5g txpower %d: %d\n", +				    hw->conf.power_level, ret);  	}  	if (changed & IEEE80211_CONF_CHANGE_PS)  		ath10k_config_ps(ar);  	if (changed & IEEE80211_CONF_CHANGE_MONITOR) { -		if (conf->flags & IEEE80211_CONF_MONITOR) -			ret = ath10k_monitor_create(ar); -		else -			ret = ath10k_monitor_destroy(ar); +		if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) { +			ar->monitor = true; +			ret = ath10k_monitor_start(ar); +			if (ret) { +				ath10k_warn("failed to start monitor (config): %d\n", +					    ret); +				ar->monitor = false; +			} +		} else if (!(conf->flags & IEEE80211_CONF_MONITOR) && +			   ar->monitor) { +			ar->monitor = false; +			ath10k_monitor_stop(ar); +		}  	} -	ath10k_wmi_flush_tx(ar);  	mutex_unlock(&ar->conf_mutex);  	return ret;  } @@ -1922,6 +2710,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,  	int ret = 0;  	u32 value;  	int bit; +	u32 vdev_param;  	mutex_lock(&ar->conf_mutex); @@ -1930,21 +2719,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,  	arvif->ar = ar;  	arvif->vif = vif; -	if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { -		ath10k_warn("Only one monitor interface allowed\n"); -		ret = -EBUSY; -		goto exit; -	} +	INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); +	INIT_LIST_HEAD(&arvif->list);  	bit = ffs(ar->free_vdev_map);  	if (bit == 0) {  		ret = -EBUSY; -		goto exit; +		goto err;  	}  	arvif->vdev_id = bit - 1;  	arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; -	ar->free_vdev_map &= ~(1 << arvif->vdev_id);  	if (ar->p2p)  		arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; @@ -1973,32 +2758,52 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,  		break;  	} -	ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n", +	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",  		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);  	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,  				     arvif->vdev_subtype, vif->addr);  	if (ret) { -		ath10k_warn("WMI vdev create failed: ret %d\n", ret); -		goto exit; +		ath10k_warn("failed to create WMI vdev %i: %d\n", +			    arvif->vdev_id, ret); +		goto err;  	} -	ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID, -					arvif->def_wep_key_index); -	if (ret) -		ath10k_warn("Failed to set default keyid: %d\n", ret); +	ar->free_vdev_map &= ~BIT(arvif->vdev_id); +	list_add(&arvif->list, &ar->arvifs); -	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, -					WMI_VDEV_PARAM_TX_ENCAP_TYPE, +	vdev_param = ar->wmi.vdev_param->def_keyid; +	ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, +					arvif->def_wep_key_idx); +	if (ret) { +		ath10k_warn("failed to set vdev %i default key id: %d\n", +			    arvif->vdev_id, ret); +		goto err_vdev_delete; +	} + +	vdev_param = ar->wmi.vdev_param->tx_encap_type; +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,  					ATH10K_HW_TXRX_NATIVE_WIFI); -	if (ret) -		ath10k_warn("Failed to set TX encap: %d\n", ret); +	/* 10.X firmware does not support this VDEV parameter. Do not warn */ +	if (ret && ret != -EOPNOTSUPP) { +		ath10k_warn("failed to set vdev %i TX encapsulation: %d\n", +			    arvif->vdev_id, ret); +		goto err_vdev_delete; +	}  	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {  		ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);  		if (ret) { -			ath10k_warn("Failed to create peer for AP: %d\n", ret); -			goto exit; +			ath10k_warn("failed to create vdev %i peer for AP: %d\n", +				    arvif->vdev_id, ret); +			goto err_vdev_delete; +		} + +		ret = ath10k_mac_set_kickout(arvif); +		if (ret) { +			ath10k_warn("failed to set vdev %i kickout parameters: %d\n", +				    arvif->vdev_id, ret); +			goto err_peer_delete;  		}  	} @@ -2007,39 +2812,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,  		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;  		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,  						  param, value); -		if (ret) -			ath10k_warn("Failed to set RX wake policy: %d\n", ret); +		if (ret) { +			ath10k_warn("failed to set vdev %i RX wake policy: %d\n", +				    arvif->vdev_id, ret); +			goto err_peer_delete; +		}  		param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;  		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;  		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,  						  param, value); -		if (ret) -			ath10k_warn("Failed to set TX wake thresh: %d\n", ret); +		if (ret) { +			ath10k_warn("failed to set vdev %i TX wake thresh: %d\n", +				    arvif->vdev_id, ret); +			goto err_peer_delete; +		}  		param = WMI_STA_PS_PARAM_PSPOLL_COUNT;  		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;  		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,  						  param, value); -		if (ret) -			ath10k_warn("Failed to set PSPOLL count: %d\n", ret); +		if (ret) { +			ath10k_warn("failed to set vdev %i PSPOLL count: %d\n", +				    arvif->vdev_id, ret); +			goto err_peer_delete; +		}  	}  	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); -	if (ret) -		ath10k_warn("failed to set rts threshold for vdev %d (%d)\n", +	if (ret) { +		ath10k_warn("failed to set rts threshold for vdev %d: %d\n",  			    arvif->vdev_id, ret); +		goto err_peer_delete; +	}  	ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); -	if (ret) -		ath10k_warn("failed to set frag threshold for vdev %d (%d)\n", +	if (ret) { +		ath10k_warn("failed to set frag threshold for vdev %d: %d\n",  			    arvif->vdev_id, ret); +		goto err_peer_delete; +	} -	if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) -		ar->monitor_present = true; +	mutex_unlock(&ar->conf_mutex); +	return 0; -exit: +err_peer_delete: +	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) +		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); + +err_vdev_delete: +	ath10k_wmi_vdev_delete(ar, arvif->vdev_id); +	ar->free_vdev_map &= ~BIT(arvif->vdev_id); +	list_del(&arvif->list); + +err:  	mutex_unlock(&ar->conf_mutex); +  	return ret;  } @@ -2052,24 +2880,37 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,  	mutex_lock(&ar->conf_mutex); -	ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id); +	cancel_work_sync(&arvif->wep_key_work); + +	spin_lock_bh(&ar->data_lock); +	if (arvif->beacon) { +		dma_unmap_single(arvif->ar->dev, +				 ATH10K_SKB_CB(arvif->beacon)->paddr, +				 arvif->beacon->len, DMA_TO_DEVICE); +		dev_kfree_skb_any(arvif->beacon); +		arvif->beacon = NULL; +	} +	spin_unlock_bh(&ar->data_lock);  	ar->free_vdev_map |= 1 << (arvif->vdev_id); +	list_del(&arvif->list);  	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {  		ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);  		if (ret) -			ath10k_warn("Failed to remove peer for AP: %d\n", ret); +			ath10k_warn("failed to remove peer for AP vdev %i: %d\n", +				    arvif->vdev_id, ret);  		kfree(arvif->u.ap.noa_data);  	} +	ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", +		   arvif->vdev_id); +  	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);  	if (ret) -		ath10k_warn("WMI vdev delete failed: %d\n", ret); - -	if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) -		ar->monitor_present = false; +		ath10k_warn("failed to delete WMI vdev %i: %d\n", +			    arvif->vdev_id, ret);  	ath10k_peer_cleanup(ar, arvif->vdev_id); @@ -2103,20 +2944,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,  	*total_flags &= SUPPORTED_FILTERS;  	ar->filter_flags = *total_flags; -	if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && -	    !ar->monitor_enabled) { -		ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); -		if (ret) -			ath10k_warn("Unable to start monitor mode\n"); -		else -			ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n"); -	} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && -		   ar->monitor_enabled) { -		ret = ath10k_monitor_stop(ar); -		if (ret) -			ath10k_warn("Unable to stop monitor mode\n"); -		else -			ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n"); +	if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) { +		ar->promisc = true; +		ret = ath10k_monitor_start(ar); +		if (ret) { +			ath10k_warn("failed to start monitor (promisc): %d\n", +				    ret); +			ar->promisc = false; +		} +	} else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) { +		ar->promisc = false; +		ath10k_monitor_stop(ar);  	}  	mutex_unlock(&ar->conf_mutex); @@ -2130,6 +2968,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  	struct ath10k *ar = hw->priv;  	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);  	int ret = 0; +	u32 vdev_param, pdev_param;  	mutex_lock(&ar->conf_mutex); @@ -2138,44 +2977,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  	if (changed & BSS_CHANGED_BEACON_INT) {  		arvif->beacon_interval = info->beacon_int; -		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, -						WMI_VDEV_PARAM_BEACON_INTERVAL, +		vdev_param = ar->wmi.vdev_param->beacon_interval; +		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,  						arvif->beacon_interval); +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac vdev %d beacon_interval %d\n", +			   arvif->vdev_id, arvif->beacon_interval); +  		if (ret) -			ath10k_warn("Failed to set beacon interval for VDEV: %d\n", -				    arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Beacon interval: %d set for VDEV: %d\n", -				   arvif->beacon_interval, arvif->vdev_id); +			ath10k_warn("failed to set beacon interval for vdev %d: %i\n", +				    arvif->vdev_id, ret);  	}  	if (changed & BSS_CHANGED_BEACON) { -		ret = ath10k_wmi_pdev_set_param(ar, -						WMI_PDEV_PARAM_BEACON_TX_MODE, +		ath10k_dbg(ATH10K_DBG_MAC, +			   "vdev %d set beacon tx mode to staggered\n", +			   arvif->vdev_id); + +		pdev_param = ar->wmi.pdev_param->beacon_tx_mode; +		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,  						WMI_BEACON_STAGGERED_MODE);  		if (ret) -			ath10k_warn("Failed to set beacon mode for VDEV: %d\n", -				    arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Set staggered beacon mode for VDEV: %d\n", -				   arvif->vdev_id); +			ath10k_warn("failed to set beacon mode for vdev %d: %i\n", +				    arvif->vdev_id, ret);  	}  	if (changed & BSS_CHANGED_BEACON_INFO) {  		arvif->dtim_period = info->dtim_period; -		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, -						WMI_VDEV_PARAM_DTIM_PERIOD, +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac vdev %d dtim_period %d\n", +			   arvif->vdev_id, arvif->dtim_period); + +		vdev_param = ar->wmi.vdev_param->dtim_period; +		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,  						arvif->dtim_period);  		if (ret) -			ath10k_warn("Failed to set dtim period for VDEV: %d\n", -				    arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Set dtim period: %d for VDEV: %d\n", -				   arvif->dtim_period, arvif->vdev_id); +			ath10k_warn("failed to set dtim period for vdev %d: %i\n", +				    arvif->vdev_id, ret);  	}  	if (changed & BSS_CHANGED_SSID && @@ -2186,32 +3025,42 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  		arvif->u.ap.hidden_ssid = info->hidden_ssid;  	} -	if (changed & BSS_CHANGED_BSSID) { +	/* +	 * Firmware manages AP self-peer internally so make sure to not create +	 * it in driver. Otherwise AP self-peer deletion may timeout later. +	 */ +	if (changed & BSS_CHANGED_BSSID && +	    vif->type != NL80211_IFTYPE_AP) {  		if (!is_zero_ether_addr(info->bssid)) { +			ath10k_dbg(ATH10K_DBG_MAC, +				   "mac vdev %d create peer %pM\n", +				   arvif->vdev_id, info->bssid); +  			ret = ath10k_peer_create(ar, arvif->vdev_id,  						 info->bssid);  			if (ret) -				ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", -					    info->bssid, arvif->vdev_id); -			else -				ath10k_dbg(ATH10K_DBG_MAC, -					   "Added peer: %pM for VDEV: %d\n", -					   info->bssid, arvif->vdev_id); - +				ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n", +					    info->bssid, arvif->vdev_id, ret);  			if (vif->type == NL80211_IFTYPE_STATION) {  				/*  				 * this is never erased as we it for crypto key  				 * clearing; this is FW requirement  				 */ -				memcpy(arvif->u.sta.bssid, info->bssid, -				       ETH_ALEN); +				memcpy(arvif->bssid, info->bssid, ETH_ALEN); + +				ath10k_dbg(ATH10K_DBG_MAC, +					   "mac vdev %d start %pM\n", +					   arvif->vdev_id, info->bssid);  				ret = ath10k_vdev_start(arvif); -				if (!ret) -					ath10k_dbg(ATH10K_DBG_MAC, -						   "VDEV: %d started with BSSID: %pM\n", -						   arvif->vdev_id, info->bssid); +				if (ret) { +					ath10k_warn("failed to start vdev %i: %d\n", +						    arvif->vdev_id, ret); +					goto exit; +				} + +				arvif->is_started = true;  			}  			/* @@ -2220,7 +3069,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  			 * IBSS in order to remove BSSID peer.  			 */  			if (vif->type == NL80211_IFTYPE_ADHOC) -				memcpy(arvif->u.ibss.bssid, info->bssid, +				memcpy(arvif->bssid, info->bssid,  				       ETH_ALEN);  		}  	} @@ -2229,22 +3078,14 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  		ath10k_control_beaconing(arvif, info);  	if (changed & BSS_CHANGED_ERP_CTS_PROT) { -		u32 cts_prot; -		if (info->use_cts_prot) -			cts_prot = 1; -		else -			cts_prot = 0; +		arvif->use_cts_prot = info->use_cts_prot; +		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", +			   arvif->vdev_id, info->use_cts_prot); -		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, -						WMI_VDEV_PARAM_ENABLE_RTSCTS, -						cts_prot); +		ret = ath10k_recalc_rtscts_prot(arvif);  		if (ret) -			ath10k_warn("Failed to set CTS prot for VDEV: %d\n", -				    arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Set CTS prot: %d for VDEV: %d\n", -				   cts_prot, arvif->vdev_id); +			ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", +				    arvif->vdev_id, ret);  	}  	if (changed & BSS_CHANGED_ERP_SLOT) { @@ -2255,16 +3096,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  		else  			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ -		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, -						WMI_VDEV_PARAM_SLOT_TIME, +		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", +			   arvif->vdev_id, slottime); + +		vdev_param = ar->wmi.vdev_param->slot_time; +		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,  						slottime);  		if (ret) -			ath10k_warn("Failed to set erp slot for VDEV: %d\n", -				    arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Set slottime: %d for VDEV: %d\n", -				   slottime, arvif->vdev_id); +			ath10k_warn("failed to set erp slot for vdev %d: %i\n", +				    arvif->vdev_id, ret);  	}  	if (changed & BSS_CHANGED_ERP_PREAMBLE) { @@ -2274,16 +3114,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  		else  			preamble = WMI_VDEV_PREAMBLE_LONG; -		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, -						WMI_VDEV_PARAM_PREAMBLE, +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac vdev %d preamble %dn", +			   arvif->vdev_id, preamble); + +		vdev_param = ar->wmi.vdev_param->preamble; +		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,  						preamble);  		if (ret) -			ath10k_warn("Failed to set preamble for VDEV: %d\n", -				    arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Set preamble: %d for VDEV: %d\n", -				   preamble, arvif->vdev_id); +			ath10k_warn("failed to set preamble for vdev %d: %i\n", +				    arvif->vdev_id, ret);  	}  	if (changed & BSS_CHANGED_ASSOC) { @@ -2291,6 +3131,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,  			ath10k_bss_assoc(hw, vif, info);  	} +exit:  	mutex_unlock(&ar->conf_mutex);  } @@ -2313,8 +3154,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,  		goto exit;  	} -	INIT_COMPLETION(ar->scan.started); -	INIT_COMPLETION(ar->scan.completed); +	reinit_completion(&ar->scan.started); +	reinit_completion(&ar->scan.completed);  	ar->scan.in_progress = true;  	ar->scan.aborting = false;  	ar->scan.is_roc = false; @@ -2352,7 +3193,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,  	ret = ath10k_start_scan(ar, &arg);  	if (ret) { -		ath10k_warn("could not start hw scan (%d)\n", ret); +		ath10k_warn("failed to start hw scan: %d\n", ret);  		spin_lock_bh(&ar->data_lock);  		ar->scan.in_progress = false;  		spin_unlock_bh(&ar->data_lock); @@ -2372,13 +3213,50 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,  	mutex_lock(&ar->conf_mutex);  	ret = ath10k_abort_scan(ar);  	if (ret) { -		ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n", -			    ret); +		ath10k_warn("failed to abort scan: %d\n", ret);  		ieee80211_scan_completed(hw, 1 /* aborted */);  	}  	mutex_unlock(&ar->conf_mutex);  } +static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, +					struct ath10k_vif *arvif, +					enum set_key_cmd cmd, +					struct ieee80211_key_conf *key) +{ +	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; +	int ret; + +	/* 10.1 firmware branch requires default key index to be set to group +	 * key index after installing it. Otherwise FW/HW Txes corrupted +	 * frames with multi-vif APs. This is not required for main firmware +	 * branch (e.g. 636). +	 * +	 * FIXME: This has been tested only in AP. It remains unknown if this +	 * is required for multi-vif STA interfaces on 10.1 */ + +	if (arvif->vdev_type != WMI_VDEV_TYPE_AP) +		return; + +	if (key->cipher == WLAN_CIPHER_SUITE_WEP40) +		return; + +	if (key->cipher == WLAN_CIPHER_SUITE_WEP104) +		return; + +	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) +		return; + +	if (cmd != SET_KEY) +		return; + +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, +					key->keyidx); +	if (ret) +		ath10k_warn("failed to set vdev %i group key as default key: %d\n", +			    arvif->vdev_id, ret); +} +  static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,  			  struct ieee80211_key_conf *key) @@ -2413,7 +3291,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  	if (!peer) {  		if (cmd == SET_KEY) { -			ath10k_warn("cannot install key for non-existent peer %pM\n", +			ath10k_warn("failed to install key for non-existent peer %pM\n",  				    peer_addr);  			ret = -EOPNOTSUPP;  			goto exit; @@ -2436,10 +3314,13 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  	ret = ath10k_install_key(arvif, key, cmd, peer_addr);  	if (ret) { -		ath10k_warn("ath10k_install_key failed (%d)\n", ret); +		ath10k_warn("failed to install key for vdev %i peer %pM: %d\n", +			    arvif->vdev_id, peer_addr, ret);  		goto exit;  	} +	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); +  	spin_lock_bh(&ar->data_lock);  	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);  	if (peer && cmd == SET_KEY) @@ -2448,7 +3329,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  		peer->keys[key->keyidx] = NULL;  	else if (peer == NULL)  		/* impossible unless FW goes crazy */ -		ath10k_warn("peer %pM disappeared!\n", peer_addr); +		ath10k_warn("Peer %pM disappeared!\n", peer_addr);  	spin_unlock_bh(&ar->data_lock);  exit: @@ -2456,6 +3337,79 @@ exit:  	return ret;  } +static void ath10k_sta_rc_update_wk(struct work_struct *wk) +{ +	struct ath10k *ar; +	struct ath10k_vif *arvif; +	struct ath10k_sta *arsta; +	struct ieee80211_sta *sta; +	u32 changed, bw, nss, smps; +	int err; + +	arsta = container_of(wk, struct ath10k_sta, update_wk); +	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); +	arvif = arsta->arvif; +	ar = arvif->ar; + +	spin_lock_bh(&ar->data_lock); + +	changed = arsta->changed; +	arsta->changed = 0; + +	bw = arsta->bw; +	nss = arsta->nss; +	smps = arsta->smps; + +	spin_unlock_bh(&ar->data_lock); + +	mutex_lock(&ar->conf_mutex); + +	if (changed & IEEE80211_RC_BW_CHANGED) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", +			   sta->addr, bw); + +		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, +						WMI_PEER_CHAN_WIDTH, bw); +		if (err) +			ath10k_warn("failed to update STA %pM peer bw %d: %d\n", +				    sta->addr, bw, err); +	} + +	if (changed & IEEE80211_RC_NSS_CHANGED) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", +			   sta->addr, nss); + +		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, +						WMI_PEER_NSS, nss); +		if (err) +			ath10k_warn("failed to update STA %pM nss %d: %d\n", +				    sta->addr, nss, err); +	} + +	if (changed & IEEE80211_RC_SMPS_CHANGED) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", +			   sta->addr, smps); + +		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, +						WMI_PEER_SMPS_STATE, smps); +		if (err) +			ath10k_warn("failed to update STA %pM smps %d: %d\n", +				    sta->addr, smps, err); +	} + +	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", +			   sta->addr); + +		err = ath10k_station_assoc(ar, arvif, sta, true); +		if (err) +			ath10k_warn("failed to reassociate station: %pM\n", +				    sta->addr); +	} + +	mutex_unlock(&ar->conf_mutex); +} +  static int ath10k_sta_state(struct ieee80211_hw *hw,  			    struct ieee80211_vif *vif,  			    struct ieee80211_sta *sta, @@ -2464,8 +3418,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,  {  	struct ath10k *ar = hw->priv;  	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); +	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; +	int max_num_peers;  	int ret = 0; +	if (old_state == IEEE80211_STA_NOTEXIST && +	    new_state == IEEE80211_STA_NONE) { +		memset(arsta, 0, sizeof(*arsta)); +		arsta->arvif = arvif; +		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); +	} + +	/* cancel must be done outside the mutex to avoid deadlock */ +	if ((old_state == IEEE80211_STA_NONE && +	     new_state == IEEE80211_STA_NOTEXIST)) +		cancel_work_sync(&arsta->update_wk); +  	mutex_lock(&ar->conf_mutex);  	if (old_state == IEEE80211_STA_NOTEXIST && @@ -2474,27 +3442,38 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,  		/*  		 * New station addition.  		 */ +		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) +			max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1; +		else +			max_num_peers = TARGET_NUM_PEERS; + +		if (ar->num_peers >= max_num_peers) { +			ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n", +				    ar->num_peers, max_num_peers); +			ret = -ENOBUFS; +			goto exit; +		} + +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac vdev %d peer create %pM (new sta) num_peers %d\n", +			   arvif->vdev_id, sta->addr, ar->num_peers); +  		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);  		if (ret) -			ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", -				    sta->addr, arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Added peer: %pM for VDEV: %d\n", -				   sta->addr, arvif->vdev_id); +			ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n", +				    sta->addr, arvif->vdev_id, ret);  	} else if ((old_state == IEEE80211_STA_NONE &&  		    new_state == IEEE80211_STA_NOTEXIST)) {  		/*  		 * Existing station deletion.  		 */ +		ath10k_dbg(ATH10K_DBG_MAC, +			   "mac vdev %d peer delete %pM (sta gone)\n", +			   arvif->vdev_id, sta->addr);  		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);  		if (ret) -			ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", -				    sta->addr, arvif->vdev_id); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Removed peer: %pM for VDEV: %d\n", -				   sta->addr, arvif->vdev_id); +			ath10k_warn("failed to delete peer %pM for vdev %d: %i\n", +				    sta->addr, arvif->vdev_id, ret);  		if (vif->type == NL80211_IFTYPE_STATION)  			ath10k_bss_disassoc(hw, vif); @@ -2505,14 +3484,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,  		/*  		 * New association.  		 */ -		ret = ath10k_station_assoc(ar, arvif, sta); +		ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n", +			   sta->addr); + +		ret = ath10k_station_assoc(ar, arvif, sta, false);  		if (ret) -			ath10k_warn("Failed to associate station: %pM\n", -				    sta->addr); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Station %pM moved to assoc state\n", -				   sta->addr); +			ath10k_warn("failed to associate station %pM for vdev %i: %i\n", +				    sta->addr, arvif->vdev_id, ret);  	} else if (old_state == IEEE80211_STA_ASSOC &&  		   new_state == IEEE80211_STA_AUTH &&  		   (vif->type == NL80211_IFTYPE_AP || @@ -2520,16 +3498,15 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,  		/*  		 * Disassociation.  		 */ +		ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n", +			   sta->addr); +  		ret = ath10k_station_disassoc(ar, arvif, sta);  		if (ret) -			ath10k_warn("Failed to disassociate station: %pM\n", -				    sta->addr); -		else -			ath10k_dbg(ATH10K_DBG_MAC, -				   "Station %pM moved to disassociated state\n", -				   sta->addr); +			ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n", +				    sta->addr, arvif->vdev_id, ret);  	} - +exit:  	mutex_unlock(&ar->conf_mutex);  	return ret;  } @@ -2574,7 +3551,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,  					  WMI_STA_PS_PARAM_UAPSD,  					  arvif->u.sta.uapsd);  	if (ret) { -		ath10k_warn("could not set uapsd params %d\n", ret); +		ath10k_warn("failed to set uapsd params: %d\n", ret);  		goto exit;  	} @@ -2587,7 +3564,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,  					  WMI_STA_PS_PARAM_RX_WAKE_POLICY,  					  value);  	if (ret) -		ath10k_warn("could not set rx wake param %d\n", ret); +		ath10k_warn("failed to set rx wake param: %d\n", ret);  exit:  	return ret; @@ -2637,13 +3614,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,  	/* FIXME: FW accepts wmm params per hw, not per vif */  	ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);  	if (ret) { -		ath10k_warn("could not set wmm params %d\n", ret); +		ath10k_warn("failed to set wmm params: %d\n", ret);  		goto exit;  	}  	ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);  	if (ret) -		ath10k_warn("could not set sta uapsd %d\n", ret); +		ath10k_warn("failed to set sta uapsd: %d\n", ret);  exit:  	mutex_unlock(&ar->conf_mutex); @@ -2672,9 +3649,9 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,  		goto exit;  	} -	INIT_COMPLETION(ar->scan.started); -	INIT_COMPLETION(ar->scan.completed); -	INIT_COMPLETION(ar->scan.on_channel); +	reinit_completion(&ar->scan.started); +	reinit_completion(&ar->scan.completed); +	reinit_completion(&ar->scan.on_channel);  	ar->scan.in_progress = true;  	ar->scan.aborting = false;  	ar->scan.is_roc = true; @@ -2696,7 +3673,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,  	ret = ath10k_start_scan(ar, &arg);  	if (ret) { -		ath10k_warn("could not start roc scan (%d)\n", ret); +		ath10k_warn("failed to start roc scan: %d\n", ret);  		spin_lock_bh(&ar->data_lock);  		ar->scan.in_progress = false;  		spin_unlock_bh(&ar->data_lock); @@ -2705,7 +3682,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,  	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);  	if (ret == 0) { -		ath10k_warn("could not switch to channel for roc scan\n"); +		ath10k_warn("failed to switch to channel for roc scan\n");  		ath10k_abort_scan(ar);  		ret = -ETIMEDOUT;  		goto exit; @@ -2732,91 +3709,55 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)   * Both RTS and Fragmentation threshold are interface-specific   * in ath10k, but device-specific in mac80211.   */ -static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ -	struct ath10k_generic_iter *ar_iter = data; -	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); -	u32 rts = ar_iter->ar->hw->wiphy->rts_threshold; - -	lockdep_assert_held(&arvif->ar->conf_mutex); - -	/* During HW reconfiguration mac80211 reports all interfaces that were -	 * running until reconfiguration was started. Since FW doesn't have any -	 * vdevs at this point we must not iterate over this interface list. -	 * This setting will be updated upon add_interface(). */ -	if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) -		return; - -	ar_iter->ret = ath10k_mac_set_rts(arvif, rts); -	if (ar_iter->ret) -		ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", -			    arvif->vdev_id); -	else -		ath10k_dbg(ATH10K_DBG_MAC, -			   "Set RTS threshold: %d for VDEV: %d\n", -			   rts, arvif->vdev_id); -}  static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)  { -	struct ath10k_generic_iter ar_iter;  	struct ath10k *ar = hw->priv; - -	memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); -	ar_iter.ar = ar; +	struct ath10k_vif *arvif; +	int ret = 0;  	mutex_lock(&ar->conf_mutex); -	ieee80211_iterate_active_interfaces_atomic( -		hw, IEEE80211_IFACE_ITER_NORMAL, -		ath10k_set_rts_iter, &ar_iter); -	mutex_unlock(&ar->conf_mutex); - -	return ar_iter.ret; -} +	list_for_each_entry(arvif, &ar->arvifs, list) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", +			   arvif->vdev_id, value); -static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ -	struct ath10k_generic_iter *ar_iter = data; -	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); -	u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; - -	lockdep_assert_held(&arvif->ar->conf_mutex); - -	/* During HW reconfiguration mac80211 reports all interfaces that were -	 * running until reconfiguration was started. Since FW doesn't have any -	 * vdevs at this point we must not iterate over this interface list. -	 * This setting will be updated upon add_interface(). */ -	if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) -		return; +		ret = ath10k_mac_set_rts(arvif, value); +		if (ret) { +			ath10k_warn("failed to set rts threshold for vdev %d: %d\n", +				    arvif->vdev_id, ret); +			break; +		} +	} +	mutex_unlock(&ar->conf_mutex); -	ar_iter->ret = ath10k_mac_set_frag(arvif, frag); -	if (ar_iter->ret) -		ath10k_warn("Failed to set frag threshold for VDEV: %d\n", -			    arvif->vdev_id); -	else -		ath10k_dbg(ATH10K_DBG_MAC, -			   "Set frag threshold: %d for VDEV: %d\n", -			   frag, arvif->vdev_id); +	return ret;  }  static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)  { -	struct ath10k_generic_iter ar_iter;  	struct ath10k *ar = hw->priv; - -	memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); -	ar_iter.ar = ar; +	struct ath10k_vif *arvif; +	int ret = 0;  	mutex_lock(&ar->conf_mutex); -	ieee80211_iterate_active_interfaces_atomic( -		hw, IEEE80211_IFACE_ITER_NORMAL, -		ath10k_set_frag_iter, &ar_iter); +	list_for_each_entry(arvif, &ar->arvifs, list) { +		ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", +			   arvif->vdev_id, value); + +		ret = ath10k_mac_set_rts(arvif, value); +		if (ret) { +			ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n", +				    arvif->vdev_id, ret); +			break; +		} +	}  	mutex_unlock(&ar->conf_mutex); -	return ar_iter.ret; +	return ret;  } -static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +			 u32 queues, bool drop)  {  	struct ath10k *ar = hw->priv;  	bool skip; @@ -2836,8 +3777,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)  			bool empty;  			spin_lock_bh(&ar->htt.tx_lock); -			empty = bitmap_empty(ar->htt.used_msdu_ids, -					     ar->htt.max_num_pending_tx); +			empty = (ar->htt.num_pending_tx == 0);  			spin_unlock_bh(&ar->htt.tx_lock);  			skip = (ar->state == ATH10K_STATE_WEDGED); @@ -2846,7 +3786,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)  		}), ATH10K_FLUSH_TIMEOUT_HZ);  	if (ret <= 0 || skip) -		ath10k_warn("tx not flushed\n"); +		ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n", +			    skip, ar->state, ret);  skip:  	mutex_unlock(&ar->conf_mutex); @@ -2868,37 +3809,33 @@ static int ath10k_suspend(struct ieee80211_hw *hw,  	struct ath10k *ar = hw->priv;  	int ret; -	ar->is_target_paused = false; +	mutex_lock(&ar->conf_mutex); -	ret = ath10k_wmi_pdev_suspend_target(ar); +	ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);  	if (ret) { -		ath10k_warn("could not suspend target (%d)\n", ret); -		return 1; -	} - -	ret = wait_event_interruptible_timeout(ar->event_queue, -					       ar->is_target_paused == true, -					       1 * HZ); -	if (ret < 0) { -		ath10k_warn("suspend interrupted (%d)\n", ret); -		goto resume; -	} else if (ret == 0) { -		ath10k_warn("suspend timed out - target pause event never came\n"); -		goto resume; +		if (ret == -ETIMEDOUT) +			goto resume; +		ret = 1; +		goto exit;  	}  	ret = ath10k_hif_suspend(ar);  	if (ret) { -		ath10k_warn("could not suspend hif (%d)\n", ret); +		ath10k_warn("failed to suspend hif: %d\n", ret);  		goto resume;  	} -	return 0; +	ret = 0; +	goto exit;  resume:  	ret = ath10k_wmi_pdev_resume_target(ar);  	if (ret) -		ath10k_warn("could not resume target (%d)\n", ret); -	return 1; +		ath10k_warn("failed to resume target: %d\n", ret); + +	ret = 1; +exit: +	mutex_unlock(&ar->conf_mutex); +	return ret;  }  static int ath10k_resume(struct ieee80211_hw *hw) @@ -2906,19 +3843,26 @@ static int ath10k_resume(struct ieee80211_hw *hw)  	struct ath10k *ar = hw->priv;  	int ret; +	mutex_lock(&ar->conf_mutex); +  	ret = ath10k_hif_resume(ar);  	if (ret) { -		ath10k_warn("could not resume hif (%d)\n", ret); -		return 1; +		ath10k_warn("failed to resume hif: %d\n", ret); +		ret = 1; +		goto exit;  	}  	ret = ath10k_wmi_pdev_resume_target(ar);  	if (ret) { -		ath10k_warn("could not resume target (%d)\n", ret); -		return 1; +		ath10k_warn("failed to resume target: %d\n", ret); +		ret = 1; +		goto exit;  	} -	return 0; +	ret = 0; +exit: +	mutex_unlock(&ar->conf_mutex); +	return ret;  }  #endif @@ -2973,6 +3917,419 @@ exit:  	return ret;  } +/* Helper table for legacy fixed_rate/bitrate_mask */ +static const u8 cck_ofdm_rate[] = { +	/* CCK */ +	3, /* 1Mbps */ +	2, /* 2Mbps */ +	1, /* 5.5Mbps */ +	0, /* 11Mbps */ +	/* OFDM */ +	3, /* 6Mbps */ +	7, /* 9Mbps */ +	2, /* 12Mbps */ +	6, /* 18Mbps */ +	1, /* 24Mbps */ +	5, /* 36Mbps */ +	0, /* 48Mbps */ +	4, /* 54Mbps */ +}; + +/* Check if only one bit set */ +static int ath10k_check_single_mask(u32 mask) +{ +	int bit; + +	bit = ffs(mask); +	if (!bit) +		return 0; + +	mask &= ~BIT(bit - 1); +	if (mask) +		return 2; + +	return 1; +} + +static bool +ath10k_default_bitrate_mask(struct ath10k *ar, +			    enum ieee80211_band band, +			    const struct cfg80211_bitrate_mask *mask) +{ +	u32 legacy = 0x00ff; +	u8 ht = 0xff, i; +	u16 vht = 0x3ff; + +	switch (band) { +	case IEEE80211_BAND_2GHZ: +		legacy = 0x00fff; +		vht = 0; +		break; +	case IEEE80211_BAND_5GHZ: +		break; +	default: +		return false; +	} + +	if (mask->control[band].legacy != legacy) +		return false; + +	for (i = 0; i < ar->num_rf_chains; i++) +		if (mask->control[band].ht_mcs[i] != ht) +			return false; + +	for (i = 0; i < ar->num_rf_chains; i++) +		if (mask->control[band].vht_mcs[i] != vht) +			return false; + +	return true; +} + +static bool +ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask, +			enum ieee80211_band band, +			u8 *fixed_nss) +{ +	int ht_nss = 0, vht_nss = 0, i; + +	/* check legacy */ +	if (ath10k_check_single_mask(mask->control[band].legacy)) +		return false; + +	/* check HT */ +	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { +		if (mask->control[band].ht_mcs[i] == 0xff) +			continue; +		else if (mask->control[band].ht_mcs[i] == 0x00) +			break; +		else +			return false; +	} + +	ht_nss = i; + +	/* check VHT */ +	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { +		if (mask->control[band].vht_mcs[i] == 0x03ff) +			continue; +		else if (mask->control[band].vht_mcs[i] == 0x0000) +			break; +		else +			return false; +	} + +	vht_nss = i; + +	if (ht_nss > 0 && vht_nss > 0) +		return false; + +	if (ht_nss) +		*fixed_nss = ht_nss; +	else if (vht_nss) +		*fixed_nss = vht_nss; +	else +		return false; + +	return true; +} + +static bool +ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask, +			    enum ieee80211_band band, +			    enum wmi_rate_preamble *preamble) +{ +	int legacy = 0, ht = 0, vht = 0, i; + +	*preamble = WMI_RATE_PREAMBLE_OFDM; + +	/* check legacy */ +	legacy = ath10k_check_single_mask(mask->control[band].legacy); +	if (legacy > 1) +		return false; + +	/* check HT */ +	for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) +		ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]); +	if (ht > 1) +		return false; + +	/* check VHT */ +	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) +		vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]); +	if (vht > 1) +		return false; + +	/* Currently we support only one fixed_rate */ +	if ((legacy + ht + vht) != 1) +		return false; + +	if (ht) +		*preamble = WMI_RATE_PREAMBLE_HT; +	else if (vht) +		*preamble = WMI_RATE_PREAMBLE_VHT; + +	return true; +} + +static bool +ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask, +			 enum ieee80211_band band, +			 u8 *fixed_rate, +			 u8 *fixed_nss) +{ +	u8 rate = 0, pream = 0, nss = 0, i; +	enum wmi_rate_preamble preamble; + +	/* Check if single rate correct */ +	if (!ath10k_bitrate_mask_correct(mask, band, &preamble)) +		return false; + +	pream = preamble; + +	switch (preamble) { +	case WMI_RATE_PREAMBLE_CCK: +	case WMI_RATE_PREAMBLE_OFDM: +		i = ffs(mask->control[band].legacy) - 1; + +		if (band == IEEE80211_BAND_2GHZ && i < 4) +			pream = WMI_RATE_PREAMBLE_CCK; + +		if (band == IEEE80211_BAND_5GHZ) +			i += 4; + +		if (i >= ARRAY_SIZE(cck_ofdm_rate)) +			return false; + +		rate = cck_ofdm_rate[i]; +		break; +	case WMI_RATE_PREAMBLE_HT: +		for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) +			if (mask->control[band].ht_mcs[i]) +				break; + +		if (i == IEEE80211_HT_MCS_MASK_LEN) +			return false; + +		rate = ffs(mask->control[band].ht_mcs[i]) - 1; +		nss = i; +		break; +	case WMI_RATE_PREAMBLE_VHT: +		for (i = 0; i < NL80211_VHT_NSS_MAX; i++) +			if (mask->control[band].vht_mcs[i]) +				break; + +		if (i == NL80211_VHT_NSS_MAX) +			return false; + +		rate = ffs(mask->control[band].vht_mcs[i]) - 1; +		nss = i; +		break; +	} + +	*fixed_nss = nss + 1; +	nss <<= 4; +	pream <<= 6; + +	ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n", +		   pream, nss, rate); + +	*fixed_rate = pream | nss | rate; + +	return true; +} + +static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask, +				      enum ieee80211_band band, +				      u8 *fixed_rate, +				      u8 *fixed_nss) +{ +	/* First check full NSS mask, if we can simply limit NSS */ +	if (ath10k_bitrate_mask_nss(mask, band, fixed_nss)) +		return true; + +	/* Next Check single rate is set */ +	return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss); +} + +static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, +				       u8 fixed_rate, +				       u8 fixed_nss, +				       u8 force_sgi) +{ +	struct ath10k *ar = arvif->ar; +	u32 vdev_param; +	int ret = 0; + +	mutex_lock(&ar->conf_mutex); + +	if (arvif->fixed_rate == fixed_rate && +	    arvif->fixed_nss == fixed_nss && +	    arvif->force_sgi == force_sgi) +		goto exit; + +	if (fixed_rate == WMI_FIXED_RATE_NONE) +		ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); + +	if (force_sgi) +		ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n"); + +	vdev_param = ar->wmi.vdev_param->fixed_rate; +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, +					vdev_param, fixed_rate); +	if (ret) { +		ath10k_warn("failed to set fixed rate param 0x%02x: %d\n", +			    fixed_rate, ret); +		ret = -EINVAL; +		goto exit; +	} + +	arvif->fixed_rate = fixed_rate; + +	vdev_param = ar->wmi.vdev_param->nss; +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, +					vdev_param, fixed_nss); + +	if (ret) { +		ath10k_warn("failed to set fixed nss param %d: %d\n", +			    fixed_nss, ret); +		ret = -EINVAL; +		goto exit; +	} + +	arvif->fixed_nss = fixed_nss; + +	vdev_param = ar->wmi.vdev_param->sgi; +	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, +					force_sgi); + +	if (ret) { +		ath10k_warn("failed to set sgi param %d: %d\n", +			    force_sgi, ret); +		ret = -EINVAL; +		goto exit; +	} + +	arvif->force_sgi = force_sgi; + +exit: +	mutex_unlock(&ar->conf_mutex); +	return ret; +} + +static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw, +				   struct ieee80211_vif *vif, +				   const struct cfg80211_bitrate_mask *mask) +{ +	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); +	struct ath10k *ar = arvif->ar; +	enum ieee80211_band band = ar->hw->conf.chandef.chan->band; +	u8 fixed_rate = WMI_FIXED_RATE_NONE; +	u8 fixed_nss = ar->num_rf_chains; +	u8 force_sgi; + +	force_sgi = mask->control[band].gi; +	if (force_sgi == NL80211_TXRATE_FORCE_LGI) +		return -EINVAL; + +	if (!ath10k_default_bitrate_mask(ar, band, mask)) { +		if (!ath10k_get_fixed_rate_nss(mask, band, +					       &fixed_rate, +					       &fixed_nss)) +			return -EINVAL; +	} + +	if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) { +		ath10k_warn("failed to force SGI usage for default rate settings\n"); +		return -EINVAL; +	} + +	return ath10k_set_fixed_rate_param(arvif, fixed_rate, +					   fixed_nss, force_sgi); +} + +static void ath10k_sta_rc_update(struct ieee80211_hw *hw, +				 struct ieee80211_vif *vif, +				 struct ieee80211_sta *sta, +				 u32 changed) +{ +	struct ath10k *ar = hw->priv; +	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; +	u32 bw, smps; + +	spin_lock_bh(&ar->data_lock); + +	ath10k_dbg(ATH10K_DBG_MAC, +		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", +		   sta->addr, changed, sta->bandwidth, sta->rx_nss, +		   sta->smps_mode); + +	if (changed & IEEE80211_RC_BW_CHANGED) { +		bw = WMI_PEER_CHWIDTH_20MHZ; + +		switch (sta->bandwidth) { +		case IEEE80211_STA_RX_BW_20: +			bw = WMI_PEER_CHWIDTH_20MHZ; +			break; +		case IEEE80211_STA_RX_BW_40: +			bw = WMI_PEER_CHWIDTH_40MHZ; +			break; +		case IEEE80211_STA_RX_BW_80: +			bw = WMI_PEER_CHWIDTH_80MHZ; +			break; +		case IEEE80211_STA_RX_BW_160: +			ath10k_warn("Invalid bandwith %d in rc update for %pM\n", +				    sta->bandwidth, sta->addr); +			bw = WMI_PEER_CHWIDTH_20MHZ; +			break; +		} + +		arsta->bw = bw; +	} + +	if (changed & IEEE80211_RC_NSS_CHANGED) +		arsta->nss = sta->rx_nss; + +	if (changed & IEEE80211_RC_SMPS_CHANGED) { +		smps = WMI_PEER_SMPS_PS_NONE; + +		switch (sta->smps_mode) { +		case IEEE80211_SMPS_AUTOMATIC: +		case IEEE80211_SMPS_OFF: +			smps = WMI_PEER_SMPS_PS_NONE; +			break; +		case IEEE80211_SMPS_STATIC: +			smps = WMI_PEER_SMPS_STATIC; +			break; +		case IEEE80211_SMPS_DYNAMIC: +			smps = WMI_PEER_SMPS_DYNAMIC; +			break; +		case IEEE80211_SMPS_NUM_MODES: +			ath10k_warn("Invalid smps %d in sta rc update for %pM\n", +				    sta->smps_mode, sta->addr); +			smps = WMI_PEER_SMPS_PS_NONE; +			break; +		} + +		arsta->smps = smps; +	} + +	arsta->changed |= changed; + +	spin_unlock_bh(&ar->data_lock); + +	ieee80211_queue_work(hw, &arsta->update_wk); +} + +static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ +	/* +	 * FIXME: Return 0 for time being. Need to figure out whether FW +	 * has the API to fetch 64-bit local TSF +	 */ + +	return 0; +} +  static const struct ieee80211_ops ath10k_ops = {  	.tx				= ath10k_tx,  	.start				= ath10k_start, @@ -2993,8 +4350,13 @@ static const struct ieee80211_ops ath10k_ops = {  	.set_frag_threshold		= ath10k_set_frag_threshold,  	.flush				= ath10k_flush,  	.tx_last_beacon			= ath10k_tx_last_beacon, +	.set_antenna			= ath10k_set_antenna, +	.get_antenna			= ath10k_get_antenna,  	.restart_complete		= ath10k_restart_complete,  	.get_survey			= ath10k_get_survey, +	.set_bitrate_mask		= ath10k_set_bitrate_mask, +	.sta_rc_update			= ath10k_sta_rc_update, +	.get_tsf			= ath10k_get_tsf,  #ifdef CONFIG_PM  	.suspend			= ath10k_suspend,  	.resume				= ath10k_resume, @@ -3127,12 +4489,37 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {  	},  }; -static const struct ieee80211_iface_combination ath10k_if_comb = { -	.limits = ath10k_if_limits, -	.n_limits = ARRAY_SIZE(ath10k_if_limits), -	.max_interfaces = 8, -	.num_different_channels = 1, -	.beacon_int_infra_match = true, +static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { +	{ +	.max	= 8, +	.types	= BIT(NL80211_IFTYPE_AP) +	}, +}; + +static const struct ieee80211_iface_combination ath10k_if_comb[] = { +	{ +		.limits = ath10k_if_limits, +		.n_limits = ARRAY_SIZE(ath10k_if_limits), +		.max_interfaces = 8, +		.num_different_channels = 1, +		.beacon_int_infra_match = true, +	}, +}; + +static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { +	{ +		.limits = ath10k_10x_if_limits, +		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits), +		.max_interfaces = 8, +		.num_different_channels = 1, +		.beacon_int_infra_match = true, +#ifdef CONFIG_ATH10K_DFS_CERTIFIED +		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) | +					BIT(NL80211_CHAN_WIDTH_20) | +					BIT(NL80211_CHAN_WIDTH_40) | +					BIT(NL80211_CHAN_WIDTH_80), +#endif +	},  };  static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) @@ -3246,7 +4633,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)  						   ath10k_get_arvif_iter,  						   &arvif_iter);  	if (!arvif_iter.arvif) { -		ath10k_warn("No VIF found for VDEV: %d\n", vdev_id); +		ath10k_warn("No VIF found for vdev %d\n", vdev_id);  		return NULL;  	} @@ -3311,9 +4698,24 @@ int ath10k_mac_register(struct ath10k *ar)  	ar->hw->wiphy->interface_modes =  		BIT(NL80211_IFTYPE_STATION) |  		BIT(NL80211_IFTYPE_ADHOC) | -		BIT(NL80211_IFTYPE_AP) | -		BIT(NL80211_IFTYPE_P2P_CLIENT) | -		BIT(NL80211_IFTYPE_P2P_GO); +		BIT(NL80211_IFTYPE_AP); + +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { +		/* TODO:  Have to deal with 2x2 chips if/when the come out. */ +		ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK; +		ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK; +	} else { +		ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK; +		ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK; +	} + +	ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask; +	ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask; + +	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) +		ar->hw->wiphy->interface_modes |= +			BIT(NL80211_IFTYPE_P2P_CLIENT) | +			BIT(NL80211_IFTYPE_P2P_GO);  	ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |  			IEEE80211_HW_SUPPORTS_PS | @@ -3323,8 +4725,12 @@ int ath10k_mac_register(struct ath10k *ar)  			IEEE80211_HW_REPORTS_TX_ACK_STATUS |  			IEEE80211_HW_HAS_RATE_CONTROL |  			IEEE80211_HW_SUPPORTS_STATIC_SMPS | -			IEEE80211_HW_WANT_MONITOR_VIF | -			IEEE80211_HW_AP_LINK_PS; +			IEEE80211_HW_AP_LINK_PS | +			IEEE80211_HW_SPECTRUM_MGMT; + +	/* MSDU can have HTT TX fragment pushed in front. The additional 4 +	 * bytes is used for padding/alignment if necessary. */ +	ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;  	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)  		ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; @@ -3338,11 +4744,12 @@ int ath10k_mac_register(struct ath10k *ar)  	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;  	ar->hw->vif_data_size = sizeof(struct ath10k_vif); +	ar->hw->sta_data_size = sizeof(struct ath10k_sta); -	ar->hw->channel_change_time = 5000;  	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;  	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; +	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;  	ar->hw->wiphy->max_remain_on_channel_duration = 5000;  	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; @@ -3352,21 +4759,38 @@ int ath10k_mac_register(struct ath10k *ar)  	 */  	ar->hw->queues = 4; -	ar->hw->wiphy->iface_combinations = &ath10k_if_comb; -	ar->hw->wiphy->n_iface_combinations = 1; +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { +		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; +		ar->hw->wiphy->n_iface_combinations = +			ARRAY_SIZE(ath10k_10x_if_comb); +	} else { +		ar->hw->wiphy->iface_combinations = ath10k_if_comb; +		ar->hw->wiphy->n_iface_combinations = +			ARRAY_SIZE(ath10k_if_comb); +	}  	ar->hw->netdev_features = NETIF_F_HW_CSUM; +	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { +		/* Init ath dfs pattern detector */ +		ar->ath_common.debug_mask = ATH_DBG_DFS; +		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, +							     NL80211_DFS_UNSET); + +		if (!ar->dfs_detector) +			ath10k_warn("failed to initialise DFS pattern detector\n"); +	} +  	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,  			    ath10k_reg_notifier);  	if (ret) { -		ath10k_err("Regulatory initialization failed\n"); +		ath10k_err("failed to initialise regulatory: %i\n", ret);  		goto err_free;  	}  	ret = ieee80211_register_hw(ar->hw);  	if (ret) { -		ath10k_err("ieee80211 registration failed: %d\n", ret); +		ath10k_err("failed to register ieee80211: %d\n", ret);  		goto err_free;  	} @@ -3392,6 +4816,9 @@ void ath10k_mac_unregister(struct ath10k *ar)  {  	ieee80211_unregister_hw(ar->hw); +	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) +		ar->dfs_detector->exit(ar->dfs_detector); +  	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);  	kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 6fce9bfb19a..ba1021997b8 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);  void ath10k_reset_scan(unsigned long ptr);  void ath10k_offchan_tx_purge(struct ath10k *ar);  void ath10k_offchan_tx_work(struct work_struct *work); +void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar); +void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);  void ath10k_halt(struct ath10k *ar);  static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index e2f9ef50b1b..d0004d59c97 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -19,6 +19,7 @@  #include <linux/module.h>  #include <linux/interrupt.h>  #include <linux/spinlock.h> +#include <linux/bitops.h>  #include "core.h"  #include "debug.h" @@ -32,15 +33,37 @@  #include "ce.h"  #include "pci.h" -static unsigned int ath10k_target_ps; -module_param(ath10k_target_ps, uint, 0644); -MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); +enum ath10k_pci_irq_mode { +	ATH10K_PCI_IRQ_AUTO = 0, +	ATH10K_PCI_IRQ_LEGACY = 1, +	ATH10K_PCI_IRQ_MSI = 2, +}; + +enum ath10k_pci_reset_mode { +	ATH10K_PCI_RESET_AUTO = 0, +	ATH10K_PCI_RESET_WARM_ONLY = 1, +}; + +static unsigned int ath10k_pci_target_ps; +static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; +static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; + +module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644); +MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option"); + +module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); +MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); + +module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); +MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); + +/* how long wait to wait for target to initialise, in ms */ +#define ATH10K_PCI_TARGET_WAIT 3000 +#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 -#define QCA988X_1_0_DEVICE_ID	(0xabcd)  #define QCA988X_2_0_DEVICE_ID	(0x003c)  static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { -	{ PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */  	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */  	{0}  }; @@ -48,58 +71,245 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {  static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,  				       u32 *data); -static void ath10k_pci_process_ce(struct ath10k *ar);  static int ath10k_pci_post_rx(struct ath10k *ar); -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, +static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,  					     int num); -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); -static void ath10k_pci_stop_ce(struct ath10k *ar); -static void ath10k_pci_device_reset(struct ath10k *ar); -static int ath10k_pci_reset_target(struct ath10k *ar); -static int ath10k_pci_start_intr(struct ath10k *ar); -static void ath10k_pci_stop_intr(struct ath10k *ar); +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); +static int ath10k_pci_cold_reset(struct ath10k *ar); +static int ath10k_pci_warm_reset(struct ath10k *ar); +static int ath10k_pci_wait_for_target_init(struct ath10k *ar); +static int ath10k_pci_init_irq(struct ath10k *ar); +static int ath10k_pci_deinit_irq(struct ath10k *ar); +static int ath10k_pci_request_irq(struct ath10k *ar); +static void ath10k_pci_free_irq(struct ath10k *ar); +static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, +			       struct ath10k_ce_pipe *rx_pipe, +			       struct bmi_xfer *xfer);  static const struct ce_attr host_ce_config_wlan[] = { -	/* host->target HTC control and raw streams */ -	{ /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, -	/* could be moved to share CE3 */ -	/* target->host HTT + HTC control */ -	{ /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, -	/* target->host WMI */ -	{ /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, -	/* host->target WMI */ -	{ /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, -	/* host->target HTT */ -	{ /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, -		    CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, -	/* unused */ -	{ /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, -	/* Target autonomous hif_memcpy */ -	{ /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, -	/* ce_diag, the Diagnostic Window */ -	{ /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, +	/* CE0: host->target HTC control and raw streams */ +	{ +		.flags = CE_ATTR_FLAGS, +		.src_nentries = 16, +		.src_sz_max = 256, +		.dest_nentries = 0, +	}, + +	/* CE1: target->host HTT + HTC control */ +	{ +		.flags = CE_ATTR_FLAGS, +		.src_nentries = 0, +		.src_sz_max = 512, +		.dest_nentries = 512, +	}, + +	/* CE2: target->host WMI */ +	{ +		.flags = CE_ATTR_FLAGS, +		.src_nentries = 0, +		.src_sz_max = 2048, +		.dest_nentries = 32, +	}, + +	/* CE3: host->target WMI */ +	{ +		.flags = CE_ATTR_FLAGS, +		.src_nentries = 32, +		.src_sz_max = 2048, +		.dest_nentries = 0, +	}, + +	/* CE4: host->target HTT */ +	{ +		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, +		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, +		.src_sz_max = 256, +		.dest_nentries = 0, +	}, + +	/* CE5: unused */ +	{ +		.flags = CE_ATTR_FLAGS, +		.src_nentries = 0, +		.src_sz_max = 0, +		.dest_nentries = 0, +	}, + +	/* CE6: target autonomous hif_memcpy */ +	{ +		.flags = CE_ATTR_FLAGS, +		.src_nentries = 0, +		.src_sz_max = 0, +		.dest_nentries = 0, +	}, + +	/* CE7: ce_diag, the Diagnostic Window */ +	{ +		.flags = CE_ATTR_FLAGS, +		.src_nentries = 2, +		.src_sz_max = DIAG_TRANSFER_LIMIT, +		.dest_nentries = 2, +	},  };  /* Target firmware's Copy Engine configuration. */  static const struct ce_pipe_config target_ce_config_wlan[] = { -	/* host->target HTC control and raw streams */ -	{ /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, -	/* target->host HTT + HTC control */ -	{ /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, -	/* target->host WMI */ -	{ /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, -	/* host->target WMI */ -	{ /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, -	/* host->target HTT */ -	{ /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, +	/* CE0: host->target HTC control and raw streams */ +	{ +		.pipenum = 0, +		.pipedir = PIPEDIR_OUT, +		.nentries = 32, +		.nbytes_max = 256, +		.flags = CE_ATTR_FLAGS, +		.reserved = 0, +	}, + +	/* CE1: target->host HTT + HTC control */ +	{ +		.pipenum = 1, +		.pipedir = PIPEDIR_IN, +		.nentries = 32, +		.nbytes_max = 512, +		.flags = CE_ATTR_FLAGS, +		.reserved = 0, +	}, + +	/* CE2: target->host WMI */ +	{ +		.pipenum = 2, +		.pipedir = PIPEDIR_IN, +		.nentries = 32, +		.nbytes_max = 2048, +		.flags = CE_ATTR_FLAGS, +		.reserved = 0, +	}, + +	/* CE3: host->target WMI */ +	{ +		.pipenum = 3, +		.pipedir = PIPEDIR_OUT, +		.nentries = 32, +		.nbytes_max = 2048, +		.flags = CE_ATTR_FLAGS, +		.reserved = 0, +	}, + +	/* CE4: host->target HTT */ +	{ +		.pipenum = 4, +		.pipedir = PIPEDIR_OUT, +		.nentries = 256, +		.nbytes_max = 256, +		.flags = CE_ATTR_FLAGS, +		.reserved = 0, +	}, +  	/* NB: 50% of src nentries, since tx has 2 frags */ -	/* unused */ -	{ /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, -	/* Reserved for target autonomous hif_memcpy */ -	{ /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, + +	/* CE5: unused */ +	{ +		.pipenum = 5, +		.pipedir = PIPEDIR_OUT, +		.nentries = 32, +		.nbytes_max = 2048, +		.flags = CE_ATTR_FLAGS, +		.reserved = 0, +	}, + +	/* CE6: Reserved for target autonomous hif_memcpy */ +	{ +		.pipenum = 6, +		.pipedir = PIPEDIR_INOUT, +		.nentries = 32, +		.nbytes_max = 4096, +		.flags = CE_ATTR_FLAGS, +		.reserved = 0, +	}, +  	/* CE7 used only by Host */  }; +static bool ath10k_pci_irq_pending(struct ath10k *ar) +{ +	u32 cause; + +	/* Check if the shared legacy irq is for us */ +	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + +				  PCIE_INTR_CAUSE_ADDRESS); +	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) +		return true; + +	return false; +} + +static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) +{ +	/* IMPORTANT: INTR_CLR register has to be set after +	 * INTR_ENABLE is set to 0, otherwise interrupt can not be +	 * really cleared. */ +	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, +			   0); +	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, +			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + +	/* IMPORTANT: this extra read transaction is required to +	 * flush the posted write buffer. */ +	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + +				 PCIE_INTR_ENABLE_ADDRESS); +} + +static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) +{ +	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + +			   PCIE_INTR_ENABLE_ADDRESS, +			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + +	/* IMPORTANT: this extra read transaction is required to +	 * flush the posted write buffer. */ +	(void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + +				 PCIE_INTR_ENABLE_ADDRESS); +} + +static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg) +{ +	struct ath10k *ar = arg; +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + +	if (ar_pci->num_msi_intrs == 0) { +		if (!ath10k_pci_irq_pending(ar)) +			return IRQ_NONE; + +		ath10k_pci_disable_and_clear_legacy_irq(ar); +	} + +	tasklet_schedule(&ar_pci->early_irq_tasklet); + +	return IRQ_HANDLED; +} + +static int ath10k_pci_request_early_irq(struct ath10k *ar) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	int ret; + +	/* Regardless whether MSI-X/MSI/legacy irqs have been set up the first +	 * interrupt from irq vector is triggered in all cases for FW +	 * indication/errors */ +	ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler, +			  IRQF_SHARED, "ath10k_pci (early)", ar); +	if (ret) { +		ath10k_warn("failed to request early irq: %d\n", ret); +		return ret; +	} + +	return 0; +} + +static void ath10k_pci_free_early_irq(struct ath10k *ar) +{ +	free_irq(ath10k_pci_priv(ar)->pdev->irq, ar); +} +  /*   * Diagnostic read/write access is provided for startup/config/debug usage.   * Caller must guarantee proper alignment, when applicable, and single user @@ -114,7 +324,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,  	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;  	unsigned int id;  	unsigned int flags; -	struct ce_state *ce_diag; +	struct ath10k_ce_pipe *ce_diag;  	/* Host buffer address in CE space */  	u32 ce_data;  	dma_addr_t ce_data_base = 0; @@ -149,9 +359,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,  	 *   2) Buffer in DMA-able space  	 */  	orig_nbytes = nbytes; -	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, -							 orig_nbytes, -							 &ce_data_base); +	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, +						       orig_nbytes, +						       &ce_data_base, +						       GFP_ATOMIC);  	if (!data_buf) {  		ret = -ENOMEM; @@ -245,12 +456,12 @@ done:  				__le32_to_cpu(((__le32 *)data_buf)[i]);  		}  	} else -		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", -			   __func__, address); +		ath10k_warn("failed to read diag value at 0x%x: %d\n", +			    address, ret);  	if (data_buf) -		pci_free_consistent(ar_pci->pdev, orig_nbytes, -				    data_buf, ce_data_base); +		dma_free_coherent(ar->dev, orig_nbytes, data_buf, +				  ce_data_base);  	return ret;  } @@ -278,7 +489,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,  	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;  	unsigned int id;  	unsigned int flags; -	struct ce_state *ce_diag; +	struct ath10k_ce_pipe *ce_diag;  	void *data_buf = NULL;  	u32 ce_data;	/* Host buffer address in CE space */  	dma_addr_t ce_data_base = 0; @@ -293,9 +504,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,  	 *   2) Buffer in DMA-able space  	 */  	orig_nbytes = nbytes; -	data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, -							 orig_nbytes, -							 &ce_data_base); +	data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, +						       orig_nbytes, +						       &ce_data_base, +						       GFP_ATOMIC);  	if (!data_buf) {  		ret = -ENOMEM;  		goto done; @@ -391,13 +603,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,  done:  	if (data_buf) { -		pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, -				    ce_data_base); +		dma_free_coherent(ar->dev, orig_nbytes, data_buf, +				  ce_data_base);  	}  	if (ret != 0) -		ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, -			   address); +		ath10k_warn("failed to write diag value at 0x%x: %d\n", +			    address, ret);  	return ret;  } @@ -426,18 +638,7 @@ static bool ath10k_pci_target_is_awake(struct ath10k *ar)  	return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);  } -static void ath10k_pci_wait(struct ath10k *ar) -{ -	int n = 100; - -	while (n-- && !ath10k_pci_target_is_awake(ar)) -		msleep(10); - -	if (n < 0) -		ath10k_warn("Unable to wakeup target\n"); -} - -void ath10k_do_pci_wake(struct ath10k *ar) +int ath10k_do_pci_wake(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);  	void __iomem *pci_addr = ar_pci->mem; @@ -453,18 +654,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)  	atomic_inc(&ar_pci->keep_awake_count);  	if (ar_pci->verified_awake) -		return; +		return 0;  	for (;;) {  		if (ath10k_pci_target_is_awake(ar)) {  			ar_pci->verified_awake = true; -			break; +			return 0;  		}  		if (tot_delay > PCIE_WAKE_TIMEOUT) { -			ath10k_warn("target takes too long to wake up (awake count %d)\n", +			ath10k_warn("target took longer %d us to wake up (awake count %d)\n", +				    PCIE_WAKE_TIMEOUT,  				    atomic_read(&ar_pci->keep_awake_count)); -			break; +			return -ETIMEDOUT;  		}  		udelay(curr_delay); @@ -489,195 +691,146 @@ void ath10k_do_pci_sleep(struct ath10k *ar)  	}  } -/* - * FIXME: Handle OOM properly. - */ -static inline -struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) -{ -	struct ath10k_pci_compl *compl = NULL; - -	spin_lock_bh(&pipe_info->pipe_lock); -	if (list_empty(&pipe_info->compl_free)) { -		ath10k_warn("Completion buffers are full\n"); -		goto exit; -	} -	compl = list_first_entry(&pipe_info->compl_free, -				 struct ath10k_pci_compl, list); -	list_del(&compl->list); -exit: -	spin_unlock_bh(&pipe_info->pipe_lock); -	return compl; -} -  /* Called by lower (CE) layer when a send to Target completes. */ -static void ath10k_pci_ce_send_done(struct ce_state *ce_state, -				    void *transfer_context, -				    u32 ce_data, -				    unsigned int nbytes, -				    unsigned int transfer_id) +static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)  {  	struct ath10k *ar = ce_state->ar;  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id]; -	struct ath10k_pci_compl *compl; -	bool process = false; +	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; +	void *transfer_context; +	u32 ce_data; +	unsigned int nbytes; +	unsigned int transfer_id; -	do { -		/* -		 * For the send completion of an item in sendlist, just -		 * increment num_sends_allowed. The upper layer callback will -		 * be triggered when last fragment is done with send. -		 */ -		if (transfer_context == CE_SENDLIST_ITEM_CTXT) { -			spin_lock_bh(&pipe_info->pipe_lock); -			pipe_info->num_sends_allowed++; -			spin_unlock_bh(&pipe_info->pipe_lock); +	while (ath10k_ce_completed_send_next(ce_state, &transfer_context, +					     &ce_data, &nbytes, +					     &transfer_id) == 0) { +		/* no need to call tx completion for NULL pointers */ +		if (transfer_context == NULL)  			continue; -		} - -		compl = get_free_compl(pipe_info); -		if (!compl) -			break; - -		compl->send_or_recv = HIF_CE_COMPLETE_SEND; -		compl->ce_state = ce_state; -		compl->pipe_info = pipe_info; -		compl->transfer_context = transfer_context; -		compl->nbytes = nbytes; -		compl->transfer_id = transfer_id; -		compl->flags = 0; - -		/* -		 * Add the completion to the processing queue. -		 */ -		spin_lock_bh(&ar_pci->compl_lock); -		list_add_tail(&compl->list, &ar_pci->compl_process); -		spin_unlock_bh(&ar_pci->compl_lock); - -		process = true; -	} while (ath10k_ce_completed_send_next(ce_state, -							   &transfer_context, -							   &ce_data, &nbytes, -							   &transfer_id) == 0); -	/* -	 * If only some of the items within a sendlist have completed, -	 * don't invoke completion processing until the entire sendlist -	 * has been sent. -	 */ -	if (!process) -		return; - -	ath10k_pci_process_ce(ar); +		cb->tx_completion(ar, transfer_context, transfer_id); +	}  }  /* Called by lower (CE) layer when data is received from the Target. */ -static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, -				    void *transfer_context, u32 ce_data, -				    unsigned int nbytes, -				    unsigned int transfer_id, -				    unsigned int flags) +static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)  {  	struct ath10k *ar = ce_state->ar;  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id]; -	struct ath10k_pci_compl *compl; +	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id]; +	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;  	struct sk_buff *skb; - -	do { -		compl = get_free_compl(pipe_info); -		if (!compl) -			break; - -		compl->send_or_recv = HIF_CE_COMPLETE_RECV; -		compl->ce_state = ce_state; -		compl->pipe_info = pipe_info; -		compl->transfer_context = transfer_context; -		compl->nbytes = nbytes; -		compl->transfer_id = transfer_id; -		compl->flags = flags; +	void *transfer_context; +	u32 ce_data; +	unsigned int nbytes, max_nbytes; +	unsigned int transfer_id; +	unsigned int flags; +	int err; + +	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, +					     &ce_data, &nbytes, &transfer_id, +					     &flags) == 0) { +		err = ath10k_pci_post_rx_pipe(pipe_info, 1); +		if (unlikely(err)) { +			/* FIXME: retry */ +			ath10k_warn("failed to replenish CE rx ring %d: %d\n", +				    pipe_info->pipe_num, err); +		}  		skb = transfer_context; +		max_nbytes = skb->len + skb_tailroom(skb);  		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, -				 skb->len + skb_tailroom(skb), -				 DMA_FROM_DEVICE); -		/* -		 * Add the completion to the processing queue. -		 */ -		spin_lock_bh(&ar_pci->compl_lock); -		list_add_tail(&compl->list, &ar_pci->compl_process); -		spin_unlock_bh(&ar_pci->compl_lock); +				 max_nbytes, DMA_FROM_DEVICE); -	} while (ath10k_ce_completed_recv_next(ce_state, -							   &transfer_context, -							   &ce_data, &nbytes, -							   &transfer_id, -							   &flags) == 0); +		if (unlikely(max_nbytes < nbytes)) { +			ath10k_warn("rxed more than expected (nbytes %d, max %d)", +				    nbytes, max_nbytes); +			dev_kfree_skb_any(skb); +			continue; +		} -	ath10k_pci_process_ce(ar); +		skb_put(skb, nbytes); +		cb->rx_completion(ar, skb, pipe_info->pipe_num); +	}  } -/* Send the first nbytes bytes of the buffer */ -static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, -				    unsigned int transfer_id, -				    unsigned int bytes, struct sk_buff *nbuf) +static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, +				struct ath10k_hif_sg_item *items, int n_items)  { -	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); -	struct ce_state *ce_hdl = pipe_info->ce_hdl; -	struct ce_sendlist sendlist; -	unsigned int len; -	u32 flags = 0; -	int ret; - -	memset(&sendlist, 0, sizeof(struct ce_sendlist)); +	struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; +	struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; +	struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; +	unsigned int nentries_mask; +	unsigned int sw_index; +	unsigned int write_index; +	int err, i = 0; + +	spin_lock_bh(&ar_pci->ce_lock); + +	nentries_mask = src_ring->nentries_mask; +	sw_index = src_ring->sw_index; +	write_index = src_ring->write_index; + +	if (unlikely(CE_RING_DELTA(nentries_mask, +				   write_index, sw_index - 1) < n_items)) { +		err = -ENOBUFS; +		goto err; +	} -	len = min(bytes, nbuf->len); -	bytes -= len; +	for (i = 0; i < n_items - 1; i++) { +		ath10k_dbg(ATH10K_DBG_PCI, +			   "pci tx item %d paddr 0x%08x len %d n_items %d\n", +			   i, items[i].paddr, items[i].len, n_items); +		ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", +				items[i].vaddr, items[i].len); + +		err = ath10k_ce_send_nolock(ce_pipe, +					    items[i].transfer_context, +					    items[i].paddr, +					    items[i].len, +					    items[i].transfer_id, +					    CE_SEND_FLAG_GATHER); +		if (err) +			goto err; +	} -	if (len & 3) -		ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); +	/* `i` is equal to `n_items -1` after for() */  	ath10k_dbg(ATH10K_DBG_PCI, -		   "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", -		   nbuf->data, (unsigned long long) skb_cb->paddr, -		   nbuf->len, len); -	ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, -			"ath10k tx: data: ", -			nbuf->data, nbuf->len); - -	ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags); - -	/* Make sure we have resources to handle this request */ -	spin_lock_bh(&pipe_info->pipe_lock); -	if (!pipe_info->num_sends_allowed) { -		ath10k_warn("Pipe: %d is full\n", pipe_id); -		spin_unlock_bh(&pipe_info->pipe_lock); -		return -ENOSR; -	} -	pipe_info->num_sends_allowed--; -	spin_unlock_bh(&pipe_info->pipe_lock); - -	ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); -	if (ret) -		ath10k_warn("CE send failed: %p\n", nbuf); +		   "pci tx item %d paddr 0x%08x len %d n_items %d\n", +		   i, items[i].paddr, items[i].len, n_items); +	ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", +			items[i].vaddr, items[i].len); + +	err = ath10k_ce_send_nolock(ce_pipe, +				    items[i].transfer_context, +				    items[i].paddr, +				    items[i].len, +				    items[i].transfer_id, +				    0); +	if (err) +		goto err; -	return ret; +	spin_unlock_bh(&ar_pci->ce_lock); +	return 0; + +err: +	for (; i > 0; i--) +		__ath10k_ce_send_revert(ce_pipe); + +	spin_unlock_bh(&ar_pci->ce_lock); +	return err;  }  static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); -	int ret; -	spin_lock_bh(&pipe_info->pipe_lock); -	ret = pipe_info->num_sends_allowed; -	spin_unlock_bh(&pipe_info->pipe_lock); +	ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n"); -	return ret; +	return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);  }  static void ath10k_pci_hif_dump_area(struct ath10k *ar) @@ -691,14 +844,13 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)  	ath10k_err("firmware crashed!\n");  	ath10k_err("hardware name %s version 0x%x\n",  		   ar->hw_params.name, ar->target_version); -	ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, -		   ar->fw_version_minor, ar->fw_version_release, -		   ar->fw_version_build); +	ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);  	host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); -	if (ath10k_pci_diag_read_mem(ar, host_addr, -				     ®_dump_area, sizeof(u32)) != 0) { -		ath10k_warn("could not read hi_failure_state\n"); +	ret = ath10k_pci_diag_read_mem(ar, host_addr, +				       ®_dump_area, sizeof(u32)); +	if (ret) { +		ath10k_err("failed to read FW dump area address: %d\n", ret);  		return;  	} @@ -708,7 +860,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)  				       ®_dump_values[0],  				       REG_DUMP_COUNT_QCA988X * sizeof(u32));  	if (ret != 0) { -		ath10k_err("could not dump FW Dump Area\n"); +		ath10k_err("failed to read FW dump area: %d\n", ret);  		return;  	} @@ -723,12 +875,14 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)  			   reg_dump_values[i + 2],  			   reg_dump_values[i + 3]); -	ieee80211_queue_work(ar->hw, &ar->restart_work); +	queue_work(ar->workqueue, &ar->restart_work);  }  static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,  					       int force)  { +	ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n"); +  	if (!force) {  		int resources;  		/* @@ -755,211 +909,54 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); +	ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");  	memcpy(&ar_pci->msg_callbacks_current, callbacks,  	       sizeof(ar_pci->msg_callbacks_current));  } -static int ath10k_pci_start_ce(struct ath10k *ar) +static int ath10k_pci_setup_ce_irq(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ce_state *ce_diag = ar_pci->ce_diag;  	const struct ce_attr *attr; -	struct hif_ce_pipe_info *pipe_info; -	struct ath10k_pci_compl *compl; -	int i, pipe_num, completions, disable_interrupts; +	struct ath10k_pci_pipe *pipe_info; +	int pipe_num, disable_interrupts; -	spin_lock_init(&ar_pci->compl_lock); -	INIT_LIST_HEAD(&ar_pci->compl_process); - -	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { +	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {  		pipe_info = &ar_pci->pipe_info[pipe_num]; -		spin_lock_init(&pipe_info->pipe_lock); -		INIT_LIST_HEAD(&pipe_info->compl_free); -  		/* Handle Diagnostic CE specially */ -		if (pipe_info->ce_hdl == ce_diag) +		if (pipe_info->ce_hdl == ar_pci->ce_diag)  			continue;  		attr = &host_ce_config_wlan[pipe_num]; -		completions = 0;  		if (attr->src_nentries) {  			disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;  			ath10k_ce_send_cb_register(pipe_info->ce_hdl,  						   ath10k_pci_ce_send_done,  						   disable_interrupts); -			completions += attr->src_nentries; -			pipe_info->num_sends_allowed = attr->src_nentries - 1;  		} -		if (attr->dest_nentries) { +		if (attr->dest_nentries)  			ath10k_ce_recv_cb_register(pipe_info->ce_hdl,  						   ath10k_pci_ce_recv_data); -			completions += attr->dest_nentries; -		} - -		if (completions == 0) -			continue; - -		for (i = 0; i < completions; i++) { -			compl = kmalloc(sizeof(struct ath10k_pci_compl), -					GFP_KERNEL); -			if (!compl) { -				ath10k_warn("No memory for completion state\n"); -				ath10k_pci_stop_ce(ar); -				return -ENOMEM; -			} - -			compl->send_or_recv = HIF_CE_COMPLETE_FREE; -			list_add_tail(&compl->list, &pipe_info->compl_free); -		}  	}  	return 0;  } -static void ath10k_pci_stop_ce(struct ath10k *ar) +static void ath10k_pci_kill_tasklet(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ath10k_pci_compl *compl; -	struct sk_buff *skb;  	int i; -	ath10k_ce_disable_interrupts(ar); - -	/* Cancel the pending tasklet */  	tasklet_kill(&ar_pci->intr_tq); +	tasklet_kill(&ar_pci->msi_fw_err); +	tasklet_kill(&ar_pci->early_irq_tasklet);  	for (i = 0; i < CE_COUNT; i++)  		tasklet_kill(&ar_pci->pipe_info[i].intr); - -	/* Mark pending completions as aborted, so that upper layers free up -	 * their associated resources */ -	spin_lock_bh(&ar_pci->compl_lock); -	list_for_each_entry(compl, &ar_pci->compl_process, list) { -		skb = (struct sk_buff *)compl->transfer_context; -		ATH10K_SKB_CB(skb)->is_aborted = true; -	} -	spin_unlock_bh(&ar_pci->compl_lock); -} - -static void ath10k_pci_cleanup_ce(struct ath10k *ar) -{ -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ath10k_pci_compl *compl, *tmp; -	struct hif_ce_pipe_info *pipe_info; -	struct sk_buff *netbuf; -	int pipe_num; - -	/* Free pending completions. */ -	spin_lock_bh(&ar_pci->compl_lock); -	if (!list_empty(&ar_pci->compl_process)) -		ath10k_warn("pending completions still present! possible memory leaks.\n"); - -	list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { -		list_del(&compl->list); -		netbuf = (struct sk_buff *)compl->transfer_context; -		dev_kfree_skb_any(netbuf); -		kfree(compl); -	} -	spin_unlock_bh(&ar_pci->compl_lock); - -	/* Free unused completions for each pipe. */ -	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { -		pipe_info = &ar_pci->pipe_info[pipe_num]; - -		spin_lock_bh(&pipe_info->pipe_lock); -		list_for_each_entry_safe(compl, tmp, -					 &pipe_info->compl_free, list) { -			list_del(&compl->list); -			kfree(compl); -		} -		spin_unlock_bh(&pipe_info->pipe_lock); -	} -} - -static void ath10k_pci_process_ce(struct ath10k *ar) -{ -	struct ath10k_pci *ar_pci = ar->hif.priv; -	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; -	struct ath10k_pci_compl *compl; -	struct sk_buff *skb; -	unsigned int nbytes; -	int ret, send_done = 0; - -	/* Upper layers aren't ready to handle tx/rx completions in parallel so -	 * we must serialize all completion processing. */ - -	spin_lock_bh(&ar_pci->compl_lock); -	if (ar_pci->compl_processing) { -		spin_unlock_bh(&ar_pci->compl_lock); -		return; -	} -	ar_pci->compl_processing = true; -	spin_unlock_bh(&ar_pci->compl_lock); - -	for (;;) { -		spin_lock_bh(&ar_pci->compl_lock); -		if (list_empty(&ar_pci->compl_process)) { -			spin_unlock_bh(&ar_pci->compl_lock); -			break; -		} -		compl = list_first_entry(&ar_pci->compl_process, -					 struct ath10k_pci_compl, list); -		list_del(&compl->list); -		spin_unlock_bh(&ar_pci->compl_lock); - -		if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { -			cb->tx_completion(ar, -					  compl->transfer_context, -					  compl->transfer_id); -			send_done = 1; -		} else { -			ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); -			if (ret) { -				ath10k_warn("Unable to post recv buffer for pipe: %d\n", -					    compl->pipe_info->pipe_num); -				break; -			} - -			skb = (struct sk_buff *)compl->transfer_context; -			nbytes = compl->nbytes; - -			ath10k_dbg(ATH10K_DBG_PCI, -				   "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n", -				   skb, nbytes); -			ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, -					"ath10k rx: ", skb->data, nbytes); - -			if (skb->len + skb_tailroom(skb) >= nbytes) { -				skb_trim(skb, 0); -				skb_put(skb, nbytes); -				cb->rx_completion(ar, skb, -						  compl->pipe_info->pipe_num); -			} else { -				ath10k_warn("rxed more than expected (nbytes %d, max %d)", -					    nbytes, -					    skb->len + skb_tailroom(skb)); -			} -		} - -		compl->send_or_recv = HIF_CE_COMPLETE_FREE; - -		/* -		 * Add completion back to the pipe's free list. -		 */ -		spin_lock_bh(&compl->pipe_info->pipe_lock); -		list_add_tail(&compl->list, &compl->pipe_info->compl_free); -		compl->pipe_info->num_sends_allowed += send_done; -		spin_unlock_bh(&compl->pipe_info->pipe_lock); -	} - -	spin_lock_bh(&ar_pci->compl_lock); -	ar_pci->compl_processing = false; -	spin_unlock_bh(&ar_pci->compl_lock);  }  /* TODO - temporary mapping while we have too few CE's */ @@ -970,6 +967,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,  {  	int ret = 0; +	ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n"); +  	/* polling for received messages not supported */  	*dl_is_polled = 0; @@ -1029,6 +1028,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,  {  	int ul_is_polled, dl_is_polled; +	ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n"); +  	(void)ath10k_pci_hif_map_service_to_pipe(ar,  						 ATH10K_HTC_SVC_ID_RSVD_CTRL,  						 ul_pipe, @@ -1037,12 +1038,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,  						 &dl_is_polled);  } -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, +static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,  				   int num)  {  	struct ath10k *ar = pipe_info->hif_ce_state;  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ce_state *ce_state = pipe_info->ce_hdl; +	struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;  	struct sk_buff *skb;  	dma_addr_t ce_data;  	int i, ret = 0; @@ -1053,7 +1054,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,  	for (i = 0; i < num; i++) {  		skb = dev_alloc_skb(pipe_info->buf_sz);  		if (!skb) { -			ath10k_warn("could not allocate skbuff for pipe %d\n", +			ath10k_warn("failed to allocate skbuff for pipe %d\n",  				    num);  			ret = -ENOMEM;  			goto err; @@ -1066,7 +1067,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,  					 DMA_FROM_DEVICE);  		if (unlikely(dma_mapping_error(ar->dev, ce_data))) { -			ath10k_warn("could not dma map skbuff\n"); +			ath10k_warn("failed to DMA map sk_buff\n");  			dev_kfree_skb_any(skb);  			ret = -EIO;  			goto err; @@ -1081,7 +1082,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,  		ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,  						 ce_data);  		if (ret) { -			ath10k_warn("could not enqueue to pipe %d (%d)\n", +			ath10k_warn("failed to enqueue to pipe %d: %d\n",  				    num, ret);  			goto err;  		} @@ -1097,11 +1098,11 @@ err:  static int ath10k_pci_post_rx(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct hif_ce_pipe_info *pipe_info; +	struct ath10k_pci_pipe *pipe_info;  	const struct ce_attr *attr;  	int pipe_num, ret = 0; -	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { +	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {  		pipe_info = &ar_pci->pipe_info[pipe_num];  		attr = &host_ce_config_wlan[pipe_num]; @@ -1111,8 +1112,8 @@ static int ath10k_pci_post_rx(struct ath10k *ar)  		ret = ath10k_pci_post_rx_pipe(pipe_info,  					      attr->dest_nentries - 1);  		if (ret) { -			ath10k_warn("Unable to replenish recv buffers for pipe: %d\n", -				    pipe_num); +			ath10k_warn("failed to post RX buffer for pipe %d: %d\n", +				    pipe_num, ret);  			for (; pipe_num >= 0; pipe_num--) {  				pipe_info = &ar_pci->pipe_info[pipe_num]; @@ -1128,30 +1129,57 @@ static int ath10k_pci_post_rx(struct ath10k *ar)  static int ath10k_pci_hif_start(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int ret; +	int ret, ret_early; -	ret = ath10k_pci_start_ce(ar); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n"); + +	ath10k_pci_free_early_irq(ar); +	ath10k_pci_kill_tasklet(ar); + +	ret = ath10k_pci_request_irq(ar);  	if (ret) { -		ath10k_warn("could not start CE (%d)\n", ret); -		return ret; +		ath10k_warn("failed to post RX buffers for all pipes: %d\n", +			    ret); +		goto err_early_irq; +	} + +	ret = ath10k_pci_setup_ce_irq(ar); +	if (ret) { +		ath10k_warn("failed to setup CE interrupts: %d\n", ret); +		goto err_stop;  	}  	/* Post buffers once to start things off. */  	ret = ath10k_pci_post_rx(ar);  	if (ret) { -		ath10k_warn("could not post rx pipes (%d)\n", ret); -		return ret; +		ath10k_warn("failed to post RX buffers for all pipes: %d\n", +			    ret); +		goto err_stop;  	}  	ar_pci->started = 1;  	return 0; + +err_stop: +	ath10k_ce_disable_interrupts(ar); +	ath10k_pci_free_irq(ar); +	ath10k_pci_kill_tasklet(ar); +err_early_irq: +	/* Though there should be no interrupts (device was reset) +	 * power_down() expects the early IRQ to be installed as per the +	 * driver lifecycle. */ +	ret_early = ath10k_pci_request_early_irq(ar); +	if (ret_early) +		ath10k_warn("failed to re-enable early irq: %d\n", ret_early); + +	return ret;  } -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)  {  	struct ath10k *ar;  	struct ath10k_pci *ar_pci; -	struct ce_state *ce_hdl; +	struct ath10k_ce_pipe *ce_hdl;  	u32 buf_sz;  	struct sk_buff *netbuf;  	u32 ce_data; @@ -1179,11 +1207,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)  	}  } -static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)  {  	struct ath10k *ar;  	struct ath10k_pci *ar_pci; -	struct ce_state *ce_hdl; +	struct ath10k_ce_pipe *ce_hdl;  	struct sk_buff *netbuf;  	u32 ce_data;  	unsigned int nbytes; @@ -1206,15 +1234,13 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)  	while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,  					  &ce_data, &nbytes, &id) == 0) { -		if (netbuf != CE_SENDLIST_ITEM_CTXT) -			/* -			 * Indicate the completion to higer layer to free -			 * the buffer -			 */ -			ATH10K_SKB_CB(netbuf)->is_aborted = true; -			ar_pci->msg_callbacks_current.tx_completion(ar, -								    netbuf, -								    id); +		/* no need to call tx completion for NULL pointers */ +		if (!netbuf) +			continue; + +		ar_pci->msg_callbacks_current.tx_completion(ar, +							    netbuf, +							    id);  	}  } @@ -1231,8 +1257,8 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);  	int pipe_num; -	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { -		struct hif_ce_pipe_info *pipe_info; +	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { +		struct ath10k_pci_pipe *pipe_info;  		pipe_info = &ar_pci->pipe_info[pipe_num];  		ath10k_pci_rx_pipe_cleanup(pipe_info); @@ -1242,49 +1268,46 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)  static void ath10k_pci_ce_deinit(struct ath10k *ar)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct hif_ce_pipe_info *pipe_info; -	int pipe_num; - -	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { -		pipe_info = &ar_pci->pipe_info[pipe_num]; -		if (pipe_info->ce_hdl) { -			ath10k_ce_deinit(pipe_info->ce_hdl); -			pipe_info->ce_hdl = NULL; -			pipe_info->buf_sz = 0; -		} -	} -} - -static void ath10k_pci_disable_irqs(struct ath10k *ar) -{ -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);  	int i; -	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) -		disable_irq(ar_pci->pdev->irq + i); +	for (i = 0; i < CE_COUNT; i++) +		ath10k_ce_deinit_pipe(ar, i);  }  static void ath10k_pci_hif_stop(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	int ret; -	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n"); -	/* Irqs are never explicitly re-enabled. They are implicitly re-enabled -	 * by ath10k_pci_start_intr(). */ -	ath10k_pci_disable_irqs(ar); +	if (WARN_ON(!ar_pci->started)) +		return; -	ath10k_pci_stop_ce(ar); +	ret = ath10k_ce_disable_interrupts(ar); +	if (ret) +		ath10k_warn("failed to disable CE interrupts: %d\n", ret); + +	ath10k_pci_free_irq(ar); +	ath10k_pci_kill_tasklet(ar); + +	ret = ath10k_pci_request_early_irq(ar); +	if (ret) +		ath10k_warn("failed to re-enable early irq: %d\n", ret);  	/* At this point, asynchronous threads are stopped, the target should  	 * not DMA nor interrupt. We process the leftovers and then free  	 * everything else up. */ -	ath10k_pci_process_ce(ar); -	ath10k_pci_cleanup_ce(ar);  	ath10k_pci_buffer_cleanup(ar); +	/* Make the sure the device won't access any structures on the host by +	 * resetting it. The device was fed with PCI CE ringbuffer +	 * configuration during init. If ringbuffers are freed and the device +	 * were to access them this could lead to memory corruption on the +	 * host. */ +	ath10k_pci_warm_reset(ar); +  	ar_pci->started = 0;  } @@ -1293,14 +1316,18 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,  					   void *resp, u32 *resp_len)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; -	struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; +	struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; +	struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; +	struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; +	struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;  	dma_addr_t req_paddr = 0;  	dma_addr_t resp_paddr = 0;  	struct bmi_xfer xfer = {};  	void *treq, *tresp = NULL;  	int ret = 0; +	might_sleep(); +  	if (resp && !resp_len)  		return -EINVAL; @@ -1341,14 +1368,12 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,  	if (ret)  		goto err_resp; -	ret = wait_for_completion_timeout(&xfer.done, -					  BMI_COMMUNICATION_TIMEOUT_HZ); -	if (ret <= 0) { +	ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); +	if (ret) {  		u32 unused_buffer;  		unsigned int unused_nbytes;  		unsigned int unused_id; -		ret = -ETIMEDOUT;  		ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,  					   &unused_nbytes, &unused_id);  	} else { @@ -1378,13 +1403,16 @@ err_dma:  	return ret;  } -static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, -				     void *transfer_context, -				     u32 data, -				     unsigned int nbytes, -				     unsigned int transfer_id) +static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)  { -	struct bmi_xfer *xfer = transfer_context; +	struct bmi_xfer *xfer; +	u32 ce_data; +	unsigned int nbytes; +	unsigned int transfer_id; + +	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data, +					  &nbytes, &transfer_id)) +		return;  	if (xfer->wait_for_resp)  		return; @@ -1392,14 +1420,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,  	complete(&xfer->done);  } -static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, -				     void *transfer_context, -				     u32 data, -				     unsigned int nbytes, -				     unsigned int transfer_id, -				     unsigned int flags) +static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)  { -	struct bmi_xfer *xfer = transfer_context; +	struct bmi_xfer *xfer; +	u32 ce_data; +	unsigned int nbytes; +	unsigned int transfer_id; +	unsigned int flags; + +	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, +					  &nbytes, &transfer_id, &flags)) +		return;  	if (!xfer->wait_for_resp) {  		ath10k_warn("unexpected: BMI data received; ignoring\n"); @@ -1410,6 +1441,25 @@ static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,  	complete(&xfer->done);  } +static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, +			       struct ath10k_ce_pipe *rx_pipe, +			       struct bmi_xfer *xfer) +{ +	unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; + +	while (time_before_eq(jiffies, timeout)) { +		ath10k_pci_bmi_send_done(tx_pipe); +		ath10k_pci_bmi_recv_data(rx_pipe); + +		if (completion_done(&xfer->done)) +			return 0; + +		schedule(); +	} + +	return -ETIMEDOUT; +} +  /*   * Map from service/endpoint to Copy Engine.   * This table is derived from the CE_PCI TABLE, above. @@ -1519,7 +1569,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)  					      CORE_CTRL_ADDRESS,  					  &core_ctrl);  	if (ret) { -		ath10k_warn("Unable to read core ctrl\n"); +		ath10k_warn("failed to read core_ctrl: %d\n", ret);  		return ret;  	} @@ -1529,10 +1579,13 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)  	ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |  					       CORE_CTRL_ADDRESS,  					   core_ctrl); -	if (ret) -		ath10k_warn("Unable to set interrupt mask\n"); +	if (ret) { +		ath10k_warn("failed to set target CPU interrupt mask: %d\n", +			    ret); +		return ret; +	} -	return ret; +	return 0;  }  static int ath10k_pci_init_config(struct ath10k *ar) @@ -1674,73 +1727,78 @@ static int ath10k_pci_init_config(struct ath10k *ar)  	return 0;  } +static int ath10k_pci_alloc_ce(struct ath10k *ar) +{ +	int i, ret; +	for (i = 0; i < CE_COUNT; i++) { +		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); +		if (ret) { +			ath10k_err("failed to allocate copy engine pipe %d: %d\n", +				   i, ret); +			return ret; +		} +	} + +	return 0; +} + +static void ath10k_pci_free_ce(struct ath10k *ar) +{ +	int i; + +	for (i = 0; i < CE_COUNT; i++) +		ath10k_ce_free_pipe(ar, i); +}  static int ath10k_pci_ce_init(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	struct hif_ce_pipe_info *pipe_info; +	struct ath10k_pci_pipe *pipe_info;  	const struct ce_attr *attr; -	int pipe_num; +	int pipe_num, ret; -	for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { +	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {  		pipe_info = &ar_pci->pipe_info[pipe_num]; +		pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];  		pipe_info->pipe_num = pipe_num;  		pipe_info->hif_ce_state = ar;  		attr = &host_ce_config_wlan[pipe_num]; -		pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); -		if (pipe_info->ce_hdl == NULL) { -			ath10k_err("Unable to initialize CE for pipe: %d\n", -				   pipe_num); - -			/* It is safe to call it here. It checks if ce_hdl is -			 * valid for each pipe */ -			ath10k_pci_ce_deinit(ar); -			return -1; +		ret = ath10k_ce_init_pipe(ar, pipe_num, attr); +		if (ret) { +			ath10k_err("failed to initialize copy engine pipe %d: %d\n", +				   pipe_num, ret); +			return ret;  		} -		if (pipe_num == ar_pci->ce_count - 1) { +		if (pipe_num == CE_COUNT - 1) {  			/*  			 * Reserve the ultimate CE for  			 * diagnostic Window support  			 */ -			ar_pci->ce_diag = -			ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl; +			ar_pci->ce_diag = pipe_info->ce_hdl;  			continue;  		}  		pipe_info->buf_sz = (size_t) (attr->src_sz_max);  	} -	/* -	 * Initially, establish CE completion handlers for use with BMI. -	 * These are overwritten with generic handlers after we exit BMI phase. -	 */ -	pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; -	ath10k_ce_send_cb_register(pipe_info->ce_hdl, -				   ath10k_pci_bmi_send_done, 0); - -	pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; -	ath10k_ce_recv_cb_register(pipe_info->ce_hdl, -				   ath10k_pci_bmi_recv_data); -  	return 0;  }  static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	u32 fw_indicator_address, fw_indicator; +	u32 fw_indicator;  	ath10k_pci_wake(ar); -	fw_indicator_address = ar_pci->fw_indicator_address; -	fw_indicator = ath10k_pci_read32(ar, fw_indicator_address); +	fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);  	if (fw_indicator & FW_IND_EVENT_PENDING) {  		/* ACK: clear Target-side pending event */ -		ath10k_pci_write32(ar, fw_indicator_address, +		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,  				   fw_indicator & ~FW_IND_EVENT_PENDING);  		if (ar_pci->started) { @@ -1757,17 +1815,120 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)  	ath10k_pci_sleep(ar);  } -static int ath10k_pci_hif_power_up(struct ath10k *ar) +/* this function effectively clears target memory controller assert line */ +static void ath10k_pci_warm_reset_si0(struct ath10k *ar)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int ret; +	u32 val; + +	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); +	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, +			       val | SOC_RESET_CONTROL_SI0_RST_MASK); +	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + +	msleep(10); + +	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); +	ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, +			       val & ~SOC_RESET_CONTROL_SI0_RST_MASK); +	val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + +	msleep(10); +} + +static int ath10k_pci_warm_reset(struct ath10k *ar) +{ +	int ret = 0; +	u32 val; + +	ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n"); -	ret = ath10k_pci_start_intr(ar); +	ret = ath10k_do_pci_wake(ar);  	if (ret) { -		ath10k_err("could not start interrupt handling (%d)\n", ret); -		goto err; +		ath10k_err("failed to wake up target: %d\n", ret); +		return ret;  	} +	/* debug */ +	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + +				PCIE_INTR_CAUSE_ADDRESS); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + +	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + +				CPU_INTR_ADDRESS); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", +		   val); + +	/* disable pending irqs */ +	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + +			   PCIE_INTR_ENABLE_ADDRESS, 0); + +	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + +			   PCIE_INTR_CLR_ADDRESS, ~0); + +	msleep(100); + +	/* clear fw indicator */ +	ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); + +	/* clear target LF timer interrupts */ +	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + +				SOC_LF_TIMER_CONTROL0_ADDRESS); +	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + +			   SOC_LF_TIMER_CONTROL0_ADDRESS, +			   val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); + +	/* reset CE */ +	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + +				SOC_RESET_CONTROL_ADDRESS); +	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, +			   val | SOC_RESET_CONTROL_CE_RST_MASK); +	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + +				SOC_RESET_CONTROL_ADDRESS); +	msleep(10); + +	/* unreset CE */ +	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, +			   val & ~SOC_RESET_CONTROL_CE_RST_MASK); +	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + +				SOC_RESET_CONTROL_ADDRESS); +	msleep(10); + +	ath10k_pci_warm_reset_si0(ar); + +	/* debug */ +	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + +				PCIE_INTR_CAUSE_ADDRESS); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + +	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + +				CPU_INTR_ADDRESS); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", +		   val); + +	/* CPU warm reset */ +	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + +				SOC_RESET_CONTROL_ADDRESS); +	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, +			   val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); + +	val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + +				SOC_RESET_CONTROL_ADDRESS); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val); + +	msleep(100); + +	ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n"); + +	ath10k_do_pci_sleep(ar); +	return ret; +} + +static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	const char *irq_mode; +	int ret; +  	/*  	 * Bring the target up cleanly.  	 * @@ -1778,50 +1939,159 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)  	 * is in an unexpected state. We try to catch that here in order to  	 * reset the Target and retry the probe.  	 */ -	ath10k_pci_device_reset(ar); +	if (cold_reset) +		ret = ath10k_pci_cold_reset(ar); +	else +		ret = ath10k_pci_warm_reset(ar); -	ret = ath10k_pci_reset_target(ar); -	if (ret) -		goto err_irq; +	if (ret) { +		ath10k_err("failed to reset target: %d\n", ret); +		goto err; +	}  	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))  		/* Force AWAKE forever */  		ath10k_do_pci_wake(ar);  	ret = ath10k_pci_ce_init(ar); -	if (ret) +	if (ret) { +		ath10k_err("failed to initialize CE: %d\n", ret);  		goto err_ps; +	} -	ret = ath10k_pci_init_config(ar); -	if (ret) +	ret = ath10k_ce_disable_interrupts(ar); +	if (ret) { +		ath10k_err("failed to disable CE interrupts: %d\n", ret);  		goto err_ce; +	} -	ret = ath10k_pci_wake_target_cpu(ar); +	ret = ath10k_pci_init_irq(ar);  	if (ret) { -		ath10k_err("could not wake up target CPU (%d)\n", ret); +		ath10k_err("failed to init irqs: %d\n", ret);  		goto err_ce;  	} +	ret = ath10k_pci_request_early_irq(ar); +	if (ret) { +		ath10k_err("failed to request early irq: %d\n", ret); +		goto err_deinit_irq; +	} + +	ret = ath10k_pci_wait_for_target_init(ar); +	if (ret) { +		ath10k_err("failed to wait for target to init: %d\n", ret); +		goto err_free_early_irq; +	} + +	ret = ath10k_pci_init_config(ar); +	if (ret) { +		ath10k_err("failed to setup init config: %d\n", ret); +		goto err_free_early_irq; +	} + +	ret = ath10k_pci_wake_target_cpu(ar); +	if (ret) { +		ath10k_err("could not wake up target CPU: %d\n", ret); +		goto err_free_early_irq; +	} + +	if (ar_pci->num_msi_intrs > 1) +		irq_mode = "MSI-X"; +	else if (ar_pci->num_msi_intrs == 1) +		irq_mode = "MSI"; +	else +		irq_mode = "legacy"; + +	if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) +		ath10k_info("pci irq %s irq_mode %d reset_mode %d\n", +			    irq_mode, ath10k_pci_irq_mode, +			    ath10k_pci_reset_mode); +  	return 0; +err_free_early_irq: +	ath10k_pci_free_early_irq(ar); +err_deinit_irq: +	ath10k_pci_deinit_irq(ar);  err_ce:  	ath10k_pci_ce_deinit(ar); +	ath10k_pci_warm_reset(ar);  err_ps:  	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))  		ath10k_do_pci_sleep(ar); -err_irq: -	ath10k_pci_stop_intr(ar);  err:  	return ret;  } +static int ath10k_pci_hif_power_up_warm(struct ath10k *ar) +{ +	int i, ret; + +	/* +	 * Sometime warm reset succeeds after retries. +	 * +	 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work +	 * at first try. +	 */ +	for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { +		ret = __ath10k_pci_hif_power_up(ar, false); +		if (ret == 0) +			break; + +		ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n", +			    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret); +	} + +	return ret; +} + +static int ath10k_pci_hif_power_up(struct ath10k *ar) +{ +	int ret; + +	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n"); + +	/* +	 * Hardware CUS232 version 2 has some issues with cold reset and the +	 * preferred (and safer) way to perform a device reset is through a +	 * warm reset. +	 * +	 * Warm reset doesn't always work though so fall back to cold reset may +	 * be necessary. +	 */ +	ret = ath10k_pci_hif_power_up_warm(ar); +	if (ret) { +		ath10k_warn("failed to power up target using warm reset: %d\n", +			    ret); + +		if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) +			return ret; + +		ath10k_warn("trying cold reset\n"); + +		ret = __ath10k_pci_hif_power_up(ar, true); +		if (ret) { +			ath10k_err("failed to power up target using cold reset too (%d)\n", +				   ret); +			return ret; +		} +	} + +	return 0; +} +  static void ath10k_pci_hif_power_down(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	ath10k_pci_stop_intr(ar); +	ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n"); +	ath10k_pci_free_early_irq(ar); +	ath10k_pci_kill_tasklet(ar); +	ath10k_pci_deinit_irq(ar);  	ath10k_pci_ce_deinit(ar); +	ath10k_pci_warm_reset(ar); +  	if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))  		ath10k_do_pci_sleep(ar);  } @@ -1876,7 +2146,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)  #endif  static const struct ath10k_hif_ops ath10k_pci_hif_ops = { -	.send_head		= ath10k_pci_hif_send_head, +	.tx_sg			= ath10k_pci_hif_tx_sg,  	.exchange_bmi_msg	= ath10k_pci_hif_exchange_bmi_msg,  	.start			= ath10k_pci_hif_start,  	.stop			= ath10k_pci_hif_stop, @@ -1895,7 +2165,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {  static void ath10k_pci_ce_tasklet(unsigned long ptr)  { -	struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; +	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;  	struct ath10k_pci *ar_pci = pipe->ar_pci;  	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); @@ -1955,25 +2225,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);  	if (ar_pci->num_msi_intrs == 0) { -		/* -		 * IMPORTANT: INTR_CLR regiser has to be set after -		 * INTR_ENABLE is set to 0, otherwise interrupt can not be -		 * really cleared. -		 */ -		iowrite32(0, ar_pci->mem + -			  (SOC_CORE_BASE_ADDRESS | -			   PCIE_INTR_ENABLE_ADDRESS)); -		iowrite32(PCIE_INTR_FIRMWARE_MASK | -			  PCIE_INTR_CE_MASK_ALL, -			  ar_pci->mem + (SOC_CORE_BASE_ADDRESS | -					 PCIE_INTR_CLR_ADDRESS)); -		/* -		 * IMPORTANT: this extra read transaction is required to -		 * flush the posted write buffer. -		 */ -		(void) ioread32(ar_pci->mem + -				(SOC_CORE_BASE_ADDRESS | -				 PCIE_INTR_ENABLE_ADDRESS)); +		if (!ath10k_pci_irq_pending(ar)) +			return IRQ_NONE; + +		ath10k_pci_disable_and_clear_legacy_irq(ar);  	}  	tasklet_schedule(&ar_pci->intr_tq); @@ -1981,6 +2236,30 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)  	return IRQ_HANDLED;  } +static void ath10k_pci_early_irq_tasklet(unsigned long data) +{ +	struct ath10k *ar = (struct ath10k *)data; +	u32 fw_ind; +	int ret; + +	ret = ath10k_pci_wake(ar); +	if (ret) { +		ath10k_warn("failed to wake target in early irq tasklet: %d\n", +			    ret); +		return; +	} + +	fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); +	if (fw_ind & FW_IND_EVENT_PENDING) { +		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, +				   fw_ind & ~FW_IND_EVENT_PENDING); +		ath10k_pci_hif_dump_area(ar); +	} + +	ath10k_pci_sleep(ar); +	ath10k_pci_enable_legacy_irq(ar); +} +  static void ath10k_pci_tasklet(unsigned long data)  {  	struct ath10k *ar = (struct ath10k *)data; @@ -1989,40 +2268,22 @@ static void ath10k_pci_tasklet(unsigned long data)  	ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */  	ath10k_ce_per_engine_service_any(ar); -	if (ar_pci->num_msi_intrs == 0) { -		/* Enable Legacy PCI line interrupts */ -		iowrite32(PCIE_INTR_FIRMWARE_MASK | -			  PCIE_INTR_CE_MASK_ALL, -			  ar_pci->mem + (SOC_CORE_BASE_ADDRESS | -					 PCIE_INTR_ENABLE_ADDRESS)); -		/* -		 * IMPORTANT: this extra read transaction is required to -		 * flush the posted write buffer -		 */ -		(void) ioread32(ar_pci->mem + -				(SOC_CORE_BASE_ADDRESS | -				 PCIE_INTR_ENABLE_ADDRESS)); -	} +	/* Re-enable legacy irq that was disabled in the irq handler */ +	if (ar_pci->num_msi_intrs == 0) +		ath10k_pci_enable_legacy_irq(ar);  } -static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) +static int ath10k_pci_request_irq_msix(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int ret; -	int i; - -	ret = pci_enable_msi_block(ar_pci->pdev, num); -	if (ret) -		return ret; +	int ret, i;  	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,  			  ath10k_pci_msi_fw_handler,  			  IRQF_SHARED, "ath10k_pci", ar);  	if (ret) { -		ath10k_warn("request_irq(%d) failed %d\n", +		ath10k_warn("failed to request MSI-X fw irq %d: %d\n",  			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); - -		pci_disable_msi(ar_pci->pdev);  		return ret;  	} @@ -2031,44 +2292,38 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)  				  ath10k_pci_per_engine_handler,  				  IRQF_SHARED, "ath10k_pci", ar);  		if (ret) { -			ath10k_warn("request_irq(%d) failed %d\n", +			ath10k_warn("failed to request MSI-X ce irq %d: %d\n",  				    ar_pci->pdev->irq + i, ret);  			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)  				free_irq(ar_pci->pdev->irq + i, ar);  			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); -			pci_disable_msi(ar_pci->pdev);  			return ret;  		}  	} -	ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);  	return 0;  } -static int ath10k_pci_start_intr_msi(struct ath10k *ar) +static int ath10k_pci_request_irq_msi(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);  	int ret; -	ret = pci_enable_msi(ar_pci->pdev); -	if (ret < 0) -		return ret; -  	ret = request_irq(ar_pci->pdev->irq,  			  ath10k_pci_interrupt_handler,  			  IRQF_SHARED, "ath10k_pci", ar); -	if (ret < 0) { -		pci_disable_msi(ar_pci->pdev); +	if (ret) { +		ath10k_warn("failed to request MSI irq %d: %d\n", +			    ar_pci->pdev->irq, ret);  		return ret;  	} -	ath10k_info("MSI interrupt handling\n");  	return 0;  } -static int ath10k_pci_start_intr_legacy(struct ath10k *ar) +static int ath10k_pci_request_irq_legacy(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);  	int ret; @@ -2076,168 +2331,247 @@ static int ath10k_pci_start_intr_legacy(struct ath10k *ar)  	ret = request_irq(ar_pci->pdev->irq,  			  ath10k_pci_interrupt_handler,  			  IRQF_SHARED, "ath10k_pci", ar); -	if (ret < 0) +	if (ret) { +		ath10k_warn("failed to request legacy irq %d: %d\n", +			    ar_pci->pdev->irq, ret);  		return ret; +	} -	/* -	 * Make sure to wake the Target before enabling Legacy -	 * Interrupt. -	 */ -	iowrite32(PCIE_SOC_WAKE_V_MASK, -		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + -		  PCIE_SOC_WAKE_ADDRESS); +	return 0; +} -	ath10k_pci_wait(ar); +static int ath10k_pci_request_irq(struct ath10k *ar) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	/* -	 * A potential race occurs here: The CORE_BASE write -	 * depends on target correctly decoding AXI address but -	 * host won't know when target writes BAR to CORE_CTRL. -	 * This write might get lost if target has NOT written BAR. -	 * For now, fix the race by repeating the write in below -	 * synchronization checking. -	 */ -	iowrite32(PCIE_INTR_FIRMWARE_MASK | -		  PCIE_INTR_CE_MASK_ALL, -		  ar_pci->mem + (SOC_CORE_BASE_ADDRESS | -				 PCIE_INTR_ENABLE_ADDRESS)); -	iowrite32(PCIE_SOC_WAKE_RESET, -		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + -		  PCIE_SOC_WAKE_ADDRESS); - -	ath10k_info("legacy interrupt handling\n"); -	return 0; +	switch (ar_pci->num_msi_intrs) { +	case 0: +		return ath10k_pci_request_irq_legacy(ar); +	case 1: +		return ath10k_pci_request_irq_msi(ar); +	case MSI_NUM_REQUEST: +		return ath10k_pci_request_irq_msix(ar); +	} + +	ath10k_warn("unknown irq configuration upon request\n"); +	return -EINVAL;  } -static int ath10k_pci_start_intr(struct ath10k *ar) +static void ath10k_pci_free_irq(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int num = MSI_NUM_REQUEST; -	int ret;  	int i; -	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar); +	/* There's at least one interrupt irregardless whether its legacy INTR +	 * or MSI or MSI-X */ +	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) +		free_irq(ar_pci->pdev->irq + i, ar); +} + +static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	int i; + +	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);  	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, -		     (unsigned long) ar); +		     (unsigned long)ar); +	tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet, +		     (unsigned long)ar);  	for (i = 0; i < CE_COUNT; i++) {  		ar_pci->pipe_info[i].ar_pci = ar_pci; -		tasklet_init(&ar_pci->pipe_info[i].intr, -			     ath10k_pci_ce_tasklet, +		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,  			     (unsigned long)&ar_pci->pipe_info[i]);  	} +} -	if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features)) -		num = 1; +static int ath10k_pci_init_irq(struct ath10k *ar) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); +	bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X, +				       ar_pci->features); +	int ret; -	if (num > 1) { -		ret = ath10k_pci_start_intr_msix(ar, num); -		if (ret == 0) -			goto exit; +	ath10k_pci_init_irq_tasklets(ar); + +	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO && +	    !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) +		ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode); + +	/* Try MSI-X */ +	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { +		ar_pci->num_msi_intrs = MSI_NUM_REQUEST; +		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, +							 ar_pci->num_msi_intrs); +		if (ret > 0) +			return 0; -		ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret); -		num = 1; +		/* fall-through */  	} -	if (num == 1) { -		ret = ath10k_pci_start_intr_msi(ar); +	/* Try MSI */ +	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { +		ar_pci->num_msi_intrs = 1; +		ret = pci_enable_msi(ar_pci->pdev);  		if (ret == 0) -			goto exit; +			return 0; -		ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n", -			    ret); -		num = 0; +		/* fall-through */  	} -	ret = ath10k_pci_start_intr_legacy(ar); +	/* Try legacy irq +	 * +	 * A potential race occurs here: The CORE_BASE write +	 * depends on target correctly decoding AXI address but +	 * host won't know when target writes BAR to CORE_CTRL. +	 * This write might get lost if target has NOT written BAR. +	 * For now, fix the race by repeating the write in below +	 * synchronization checking. */ +	ar_pci->num_msi_intrs = 0; -exit: -	ar_pci->num_msi_intrs = num; -	ar_pci->ce_count = CE_COUNT; -	return ret; +	ret = ath10k_pci_wake(ar); +	if (ret) { +		ath10k_warn("failed to wake target: %d\n", ret); +		return ret; +	} + +	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, +			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); +	ath10k_pci_sleep(ar); + +	return 0;  } -static void ath10k_pci_stop_intr(struct ath10k *ar) +static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int i; +	int ret; -	/* There's at least one interrupt irregardless whether its legacy INTR -	 * or MSI or MSI-X */ -	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) -		free_irq(ar_pci->pdev->irq + i, ar); +	ret = ath10k_pci_wake(ar); +	if (ret) { +		ath10k_warn("failed to wake target: %d\n", ret); +		return ret; +	} + +	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, +			   0); +	ath10k_pci_sleep(ar); -	if (ar_pci->num_msi_intrs > 0) +	return 0; +} + +static int ath10k_pci_deinit_irq(struct ath10k *ar) +{ +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + +	switch (ar_pci->num_msi_intrs) { +	case 0: +		return ath10k_pci_deinit_irq_legacy(ar); +	case 1: +		/* fall-through */ +	case MSI_NUM_REQUEST: +		pci_disable_msi(ar_pci->pdev); +		return 0; +	default:  		pci_disable_msi(ar_pci->pdev); +	} + +	ath10k_warn("unknown irq configuration upon deinit\n"); +	return -EINVAL;  } -static int ath10k_pci_reset_target(struct ath10k *ar) +static int ath10k_pci_wait_for_target_init(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	int wait_limit = 300; /* 3 sec */ +	unsigned long timeout; +	int ret; +	u32 val; + +	ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); + +	ret = ath10k_pci_wake(ar); +	if (ret) { +		ath10k_err("failed to wake up target for init: %d\n", ret); +		return ret; +	} + +	timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); -	/* Wait for Target to finish initialization before we proceed. */ -	iowrite32(PCIE_SOC_WAKE_V_MASK, -		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + -		  PCIE_SOC_WAKE_ADDRESS); +	do { +		val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); -	ath10k_pci_wait(ar); +		ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val); + +		/* target should never return this */ +		if (val == 0xffffffff) +			continue; + +		/* the device has crashed so don't bother trying anymore */ +		if (val & FW_IND_EVENT_PENDING) +			break; + +		if (val & FW_IND_INITIALIZED) +			break; -	while (wait_limit-- && -	       !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & -		 FW_IND_INITIALIZED)) {  		if (ar_pci->num_msi_intrs == 0)  			/* Fix potential race by repeating CORE_BASE writes */ -			iowrite32(PCIE_INTR_FIRMWARE_MASK | -				  PCIE_INTR_CE_MASK_ALL, -				  ar_pci->mem + (SOC_CORE_BASE_ADDRESS | -						 PCIE_INTR_ENABLE_ADDRESS)); +			ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS, +					       PCIE_INTR_FIRMWARE_MASK | +					       PCIE_INTR_CE_MASK_ALL); +  		mdelay(10); +	} while (time_before(jiffies, timeout)); + +	if (val == 0xffffffff) { +		ath10k_err("failed to read device register, device is gone\n"); +		ret = -EIO; +		goto out;  	} -	if (wait_limit < 0) { -		ath10k_err("Target stalled\n"); -		iowrite32(PCIE_SOC_WAKE_RESET, -			  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + -			  PCIE_SOC_WAKE_ADDRESS); -		return -EIO; +	if (val & FW_IND_EVENT_PENDING) { +		ath10k_warn("device has crashed during init\n"); +		ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, +				   val & ~FW_IND_EVENT_PENDING); +		ath10k_pci_hif_dump_area(ar); +		ret = -ECOMM; +		goto out;  	} -	iowrite32(PCIE_SOC_WAKE_RESET, -		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + -		  PCIE_SOC_WAKE_ADDRESS); +	if (!(val & FW_IND_INITIALIZED)) { +		ath10k_err("failed to receive initialized event from target: %08x\n", +			   val); +		ret = -ETIMEDOUT; +		goto out; +	} -	return 0; +	ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n"); + +out: +	ath10k_pci_sleep(ar); +	return ret;  } -static void ath10k_pci_device_reset(struct ath10k *ar) +static int ath10k_pci_cold_reset(struct ath10k *ar)  { -	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	void __iomem *mem = ar_pci->mem; -	int i; +	int i, ret;  	u32 val; -	if (!SOC_GLOBAL_RESET_ADDRESS) -		return; +	ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n"); -	if (!mem) -		return; - -	ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, -			       PCIE_SOC_WAKE_V_MASK); -	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { -		if (ath10k_pci_target_is_awake(ar)) -			break; -		msleep(1); +	ret = ath10k_do_pci_wake(ar); +	if (ret) { +		ath10k_err("failed to wake up target: %d\n", +			   ret); +		return ret;  	}  	/* Put Target, including PCIe, into RESET. */ -	val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); +	val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);  	val |= 1; -	ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); +	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);  	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { -		if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & +		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &  					  RTC_STATE_COLD_RESET_MASK)  			break;  		msleep(1); @@ -2245,16 +2579,20 @@ static void ath10k_pci_device_reset(struct ath10k *ar)  	/* Pull Target, including PCIe, out of RESET. */  	val &= ~1; -	ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); +	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);  	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { -		if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & +		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &  					    RTC_STATE_COLD_RESET_MASK))  			break;  		msleep(1);  	} -	ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); +	ath10k_do_pci_sleep(ar); + +	ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n"); + +	return 0;  }  static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) @@ -2267,13 +2605,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)  		switch (i) {  		case ATH10K_PCI_FEATURE_MSI_X: -			ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); -			break; -		case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND: -			ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); +			ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");  			break;  		case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: -			ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n"); +			ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");  			break;  		}  	} @@ -2286,9 +2621,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,  	int ret = 0;  	struct ath10k *ar;  	struct ath10k_pci *ar_pci; -	u32 lcr_val; +	u32 lcr_val, chip_id; -	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); +	ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");  	ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);  	if (ar_pci == NULL) @@ -2298,62 +2633,42 @@ static int ath10k_pci_probe(struct pci_dev *pdev,  	ar_pci->dev = &pdev->dev;  	switch (pci_dev->device) { -	case QCA988X_1_0_DEVICE_ID: -		set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features); -		break;  	case QCA988X_2_0_DEVICE_ID:  		set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);  		break;  	default:  		ret = -ENODEV; -		ath10k_err("Unkown device ID: %d\n", pci_dev->device); +		ath10k_err("Unknown device ID: %d\n", pci_dev->device);  		goto err_ar_pci;  	} -	if (ath10k_target_ps) +	if (ath10k_pci_target_ps)  		set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);  	ath10k_pci_dump_features(ar_pci);  	ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);  	if (!ar) { -		ath10k_err("ath10k_core_create failed!\n"); +		ath10k_err("failed to create driver core\n");  		ret = -EINVAL;  		goto err_ar_pci;  	} -	/* Enable QCA988X_1.0 HW workarounds */ -	if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) -		spin_lock_init(&ar_pci->hw_v1_workaround_lock); -  	ar_pci->ar = ar; -	ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;  	atomic_set(&ar_pci->keep_awake_count, 0);  	pci_set_drvdata(pdev, ar); -	/* -	 * Without any knowledge of the Host, the Target may have been reset or -	 * power cycled and its Config Space may no longer reflect the PCI -	 * address space that was assigned earlier by the PCI infrastructure. -	 * Refresh it now. -	 */ -	ret = pci_assign_resource(pdev, BAR_NUM); -	if (ret) { -		ath10k_err("cannot assign PCI space: %d\n", ret); -		goto err_ar; -	} -  	ret = pci_enable_device(pdev);  	if (ret) { -		ath10k_err("cannot enable PCI device: %d\n", ret); +		ath10k_err("failed to enable PCI device: %d\n", ret);  		goto err_ar;  	}  	/* Request MMIO resources */  	ret = pci_request_region(pdev, BAR_NUM, "ath");  	if (ret) { -		ath10k_err("PCI MMIO reservation error: %d\n", ret); +		ath10k_err("failed to request MMIO region: %d\n", ret);  		goto err_device;  	} @@ -2363,13 +2678,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev,  	 */  	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));  	if (ret) { -		ath10k_err("32-bit DMA not available: %d\n", ret); +		ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);  		goto err_region;  	}  	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));  	if (ret) { -		ath10k_err("cannot enable 32-bit consistent DMA\n"); +		ath10k_err("failed to set consistent DMA mask to 32-bit\n");  		goto err_region;  	} @@ -2386,7 +2701,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,  	/* Arrange for access to Target SoC registers. */  	mem = pci_iomap(pdev, BAR_NUM, 0);  	if (!mem) { -		ath10k_err("PCI iomap error\n"); +		ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);  		ret = -EIO;  		goto err_master;  	} @@ -2395,16 +2710,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev,  	spin_lock_init(&ar_pci->ce_lock); -	ar_pci->cacheline_sz = dma_get_cache_alignment(); +	ret = ath10k_do_pci_wake(ar); +	if (ret) { +		ath10k_err("Failed to get chip id: %d\n", ret); +		goto err_iomap; +	} -	ret = ath10k_core_register(ar); +	chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + +	ath10k_do_pci_sleep(ar); + +	ret = ath10k_pci_alloc_ce(ar);  	if (ret) { -		ath10k_err("could not register driver core (%d)\n", ret); +		ath10k_err("failed to allocate copy engine pipes: %d\n", ret);  		goto err_iomap;  	} +	ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + +	ret = ath10k_core_register(ar, chip_id); +	if (ret) { +		ath10k_err("failed to register driver core: %d\n", ret); +		goto err_free_ce; +	} +  	return 0; +err_free_ce: +	ath10k_pci_free_ce(ar);  err_iomap:  	pci_iounmap(pdev, mem);  err_master: @@ -2414,7 +2747,6 @@ err_region:  err_device:  	pci_disable_device(pdev);  err_ar: -	pci_set_drvdata(pdev, NULL);  	ath10k_core_destroy(ar);  err_ar_pci:  	/* call HIF PCI free here */ @@ -2428,7 +2760,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)  	struct ath10k *ar = pci_get_drvdata(pdev);  	struct ath10k_pci *ar_pci; -	ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); +	ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");  	if (!ar)  		return; @@ -2438,11 +2770,9 @@ static void ath10k_pci_remove(struct pci_dev *pdev)  	if (!ar_pci)  		return; -	tasklet_kill(&ar_pci->msi_fw_err); -  	ath10k_core_unregister(ar); +	ath10k_pci_free_ce(ar); -	pci_set_drvdata(pdev, NULL);  	pci_iounmap(pdev, ar_pci->mem);  	pci_release_region(pdev, BAR_NUM);  	pci_clear_master(pdev); @@ -2467,7 +2797,7 @@ static int __init ath10k_pci_init(void)  	ret = pci_register_driver(&ath10k_pci_driver);  	if (ret) -		ath10k_err("pci_register_driver failed [%d]\n", ret); +		ath10k_err("failed to register PCI driver: %d\n", ret);  	return ret;  } @@ -2483,9 +2813,5 @@ module_exit(ath10k_pci_exit);  MODULE_AUTHOR("Qualcomm Atheros");  MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");  MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);  MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 871bb339d56..dfdebb4157a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -43,22 +43,6 @@ struct bmi_xfer {  	u32 resp_len;  }; -struct ath10k_pci_compl { -	struct list_head list; -	int send_or_recv; -	struct ce_state *ce_state; -	struct hif_ce_pipe_info *pipe_info; -	void *transfer_context; -	unsigned int nbytes; -	unsigned int transfer_id; -	unsigned int flags; -}; - -/* compl_state.send_or_recv */ -#define HIF_CE_COMPLETE_FREE 0 -#define HIF_CE_COMPLETE_SEND 1 -#define HIF_CE_COMPLETE_RECV 2 -  /*   * PCI-specific Target state   * @@ -152,17 +136,16 @@ struct service_to_pipe {  enum ath10k_pci_features {  	ATH10K_PCI_FEATURE_MSI_X		= 0, -	ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND	= 1, -	ATH10K_PCI_FEATURE_SOC_POWER_SAVE	= 2, +	ATH10K_PCI_FEATURE_SOC_POWER_SAVE	= 1,  	/* keep last */  	ATH10K_PCI_FEATURE_COUNT  };  /* Per-pipe state. */ -struct hif_ce_pipe_info { +struct ath10k_pci_pipe {  	/* Handle of underlying Copy Engine */ -	struct ce_state *ce_hdl; +	struct ath10k_ce_pipe *ce_hdl;  	/* Our pipe number; facilitiates use of pipe_info ptrs. */  	u8 pipe_num; @@ -175,12 +158,6 @@ struct hif_ce_pipe_info {  	/* protects compl_free and num_send_allowed */  	spinlock_t pipe_lock; -	/* List of free CE completion slots */ -	struct list_head compl_free; - -	/* Limit the number of outstanding send requests. */ -	int num_sends_allowed; -  	struct ath10k_pci *ar_pci;  	struct tasklet_struct intr;  }; @@ -190,7 +167,6 @@ struct ath10k_pci {  	struct device *dev;  	struct ath10k *ar;  	void __iomem *mem; -	int cacheline_sz;  	DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); @@ -202,41 +178,25 @@ struct ath10k_pci {  	struct tasklet_struct intr_tq;  	struct tasklet_struct msi_fw_err; - -	/* Number of Copy Engines supported */ -	unsigned int ce_count; +	struct tasklet_struct early_irq_tasklet;  	int started;  	atomic_t keep_awake_count;  	bool verified_awake; -	/* List of CE completions to be processed */ -	struct list_head compl_process; - -	/* protects compl_processing and compl_process */ -	spinlock_t compl_lock; - -	bool compl_processing; - -	struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX]; +	struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];  	struct ath10k_hif_cb msg_callbacks_current; -	/* Target address used to signal a pending firmware event */ -	u32 fw_indicator_address; -  	/* Copy Engine used for Diagnostic Accesses */ -	struct ce_state *ce_diag; +	struct ath10k_ce_pipe *ce_diag;  	/* FIXME: document what this really protects */  	spinlock_t ce_lock;  	/* Map CE id to ce_state */ -	struct ce_state *ce_id_to_state[CE_COUNT_MAX]; - -	/* makes sure that dummy reads are atomic */ -	spinlock_t hw_v1_workaround_lock; +	struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];  };  static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) @@ -244,14 +204,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)  	return ar->hif.priv;  } -static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr) +static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)  { -	return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr); +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + +	return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);  } -static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val) +static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)  { -	iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr); +	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + +	iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);  }  #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ @@ -310,23 +274,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,  				      u32 value)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); -	void __iomem *addr = ar_pci->mem; - -	if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) { -		unsigned long irq_flags; -		spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); - -		ioread32(addr+offset+4); /* 3rd read prior to write */ -		ioread32(addr+offset+4); /* 2nd read prior to write */ -		ioread32(addr+offset+4); /* 1st read prior to write */ -		iowrite32(value, addr+offset); - -		spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock, -				       irq_flags); -	} else { -		iowrite32(value, addr+offset); -	} +	iowrite32(value, ar_pci->mem + offset);  }  static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) @@ -336,15 +285,27 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)  	return ioread32(ar_pci->mem + offset);  } -void ath10k_do_pci_wake(struct ath10k *ar); +static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) +{ +	return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); +} + +static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) +{ +	ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); +} + +int ath10k_do_pci_wake(struct ath10k *ar);  void ath10k_do_pci_sleep(struct ath10k *ar); -static inline void ath10k_pci_wake(struct ath10k *ar) +static inline int ath10k_pci_wake(struct ath10k *ar)  {  	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);  	if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) -		ath10k_do_pci_wake(ar); +		return ath10k_do_pci_wake(ar); + +	return 0;  }  static inline void ath10k_pci_sleep(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index bfec6c8f2ec..1c584c4b019 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -422,10 +422,30 @@ struct rx_mpdu_end {  #define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)  #define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15) +/* The decapped header (rx_hdr_status) contains the following: + *  a) 802.11 header + *  [padding to 4 bytes] + *  b) HW crypto parameter + *     - 0 bytes for no security + *     - 4 bytes for WEP + *     - 8 bytes for TKIP, AES + *  [padding to 4 bytes] + *  c) A-MSDU subframe header (14 bytes) if appliable + *  d) LLC/SNAP (RFC1042, 8 bytes) + * + * In case of A-MSDU only first frame in sequence contains (a) and (b). */  enum rx_msdu_decap_format { -	RX_MSDU_DECAP_RAW           = 0, -	RX_MSDU_DECAP_NATIVE_WIFI   = 1, +	RX_MSDU_DECAP_RAW = 0, + +	/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in +	 * htt_rx_desc contains the original decapped 802.11 header. */ +	RX_MSDU_DECAP_NATIVE_WIFI = 1, + +	/* Payload contains an ethernet header (struct ethhdr). */  	RX_MSDU_DECAP_ETHERNET2_DIX = 2, + +	/* Payload contains two 48-bit addresses and 2-byte length (14 bytes +	 * total), followed by an RFC1042 header (8 bytes). */  	RX_MSDU_DECAP_8023_SNAP_LLC = 3  }; diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 85e806bf725..4eb2ecbc06e 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,  );  TRACE_EVENT(ath10k_wmi_cmd, -	TP_PROTO(int id, void *buf, size_t buf_len), +	TP_PROTO(int id, void *buf, size_t buf_len, int ret), -	TP_ARGS(id, buf, buf_len), +	TP_ARGS(id, buf, buf_len, ret),  	TP_STRUCT__entry(  		__field(unsigned int, id)  		__field(size_t, buf_len)  		__dynamic_array(u8, buf, buf_len) +		__field(int, ret)  	),  	TP_fast_assign(  		__entry->id = id;  		__entry->buf_len = buf_len; +		__entry->ret = ret;  		memcpy(__get_dynamic_array(buf), buf, buf_len);  	),  	TP_printk( -		"id %d len %zu", +		"id %d len %zu ret %d",  		__entry->id, -		__entry->buf_len +		__entry->buf_len, +		__entry->ret  	)  ); @@ -158,6 +161,48 @@ TRACE_EVENT(ath10k_wmi_event,  	)  ); +TRACE_EVENT(ath10k_htt_stats, +	TP_PROTO(void *buf, size_t buf_len), + +	TP_ARGS(buf, buf_len), + +	TP_STRUCT__entry( +		__field(size_t, buf_len) +		__dynamic_array(u8, buf, buf_len) +	), + +	TP_fast_assign( +		__entry->buf_len = buf_len; +		memcpy(__get_dynamic_array(buf), buf, buf_len); +	), + +	TP_printk( +		"len %zu", +		__entry->buf_len +	) +); + +TRACE_EVENT(ath10k_wmi_dbglog, +	TP_PROTO(void *buf, size_t buf_len), + +	TP_ARGS(buf, buf_len), + +	TP_STRUCT__entry( +		__field(size_t, buf_len) +		__dynamic_array(u8, buf, buf_len) +	), + +	TP_fast_assign( +		__entry->buf_len = buf_len; +		memcpy(__get_dynamic_array(buf), buf, buf_len); +	), + +	TP_printk( +		"len %zu", +		__entry->buf_len +	) +); +  #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/  /* we don't want to use include/trace/events */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 68b6faefd1d..82669a77e55 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -44,40 +44,41 @@ out:  	spin_unlock_bh(&ar->data_lock);  } -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) +void ath10k_txrx_tx_unref(struct ath10k_htt *htt, +			  const struct htt_tx_done *tx_done)  {  	struct device *dev = htt->ar->dev;  	struct ieee80211_tx_info *info; -	struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag; -	struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu; -	int ret; +	struct ath10k_skb_cb *skb_cb; +	struct sk_buff *msdu; -	if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0) -		return; +	lockdep_assert_held(&htt->tx_lock); -	ATH10K_SKB_CB(txdesc)->htt.refcount--; +	ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", +		   tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); -	if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) +	if (tx_done->msdu_id >= htt->max_num_pending_tx) { +		ath10k_warn("warning: msdu_id %d too big, ignoring\n", +			    tx_done->msdu_id);  		return; +	} -	if (txfrag) { -		ret = ath10k_skb_unmap(dev, txfrag); -		if (ret) -			ath10k_warn("txfrag unmap failed (%d)\n", ret); +	msdu = htt->pending_tx[tx_done->msdu_id]; +	skb_cb = ATH10K_SKB_CB(msdu); -		dev_kfree_skb_any(txfrag); -	} +	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); -	ret = ath10k_skb_unmap(dev, msdu); -	if (ret) -		ath10k_warn("data skb unmap failed (%d)\n", ret); +	if (skb_cb->htt.txbuf) +		dma_pool_free(htt->tx_pool, +			      skb_cb->htt.txbuf, +			      skb_cb->htt.txbuf_paddr);  	ath10k_report_offchan_tx(htt->ar, msdu);  	info = IEEE80211_SKB_CB(msdu);  	memset(&info->status, 0, sizeof(info->status)); -	if (ATH10K_SKB_CB(txdesc)->htt.discard) { +	if (tx_done->discard) {  		ieee80211_free_txskb(htt->ar->hw, msdu);  		goto exit;  	} @@ -85,216 +86,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)  	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))  		info->flags |= IEEE80211_TX_STAT_ACK; -	if (ATH10K_SKB_CB(txdesc)->htt.no_ack) +	if (tx_done->no_ack)  		info->flags &= ~IEEE80211_TX_STAT_ACK;  	ieee80211_tx_status(htt->ar->hw, msdu);  	/* we do not own the msdu anymore */  exit: -	spin_lock_bh(&htt->tx_lock); -	htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL; -	ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id); +	htt->pending_tx[tx_done->msdu_id] = NULL; +	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);  	__ath10k_htt_tx_dec_pending(htt); -	if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx)) +	if (htt->num_pending_tx == 0)  		wake_up(&htt->empty_tx_wq); -	spin_unlock_bh(&htt->tx_lock); - -	dev_kfree_skb_any(txdesc); -} - -void ath10k_txrx_tx_completed(struct ath10k_htt *htt, -			      const struct htt_tx_done *tx_done) -{ -	struct sk_buff *txdesc; - -	ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", -		   tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); - -	if (tx_done->msdu_id >= htt->max_num_pending_tx) { -		ath10k_warn("warning: msdu_id %d too big, ignoring\n", -			    tx_done->msdu_id); -		return; -	} - -	txdesc = htt->pending_tx[tx_done->msdu_id]; - -	ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard; -	ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack; - -	ath10k_txrx_tx_unref(htt, txdesc); -} - -static const u8 rx_legacy_rate_idx[] = { -	3,	/* 0x00  - 11Mbps  */ -	2,	/* 0x01  - 5.5Mbps */ -	1,	/* 0x02  - 2Mbps   */ -	0,	/* 0x03  - 1Mbps   */ -	3,	/* 0x04  - 11Mbps  */ -	2,	/* 0x05  - 5.5Mbps */ -	1,	/* 0x06  - 2Mbps   */ -	0,	/* 0x07  - 1Mbps   */ -	10,	/* 0x08  - 48Mbps  */ -	8,	/* 0x09  - 24Mbps  */ -	6,	/* 0x0A  - 12Mbps  */ -	4,	/* 0x0B  - 6Mbps   */ -	11,	/* 0x0C  - 54Mbps  */ -	9,	/* 0x0D  - 36Mbps  */ -	7,	/* 0x0E  - 18Mbps  */ -	5,	/* 0x0F  - 9Mbps   */ -}; - -static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info, -			     enum ieee80211_band band, -			     struct ieee80211_rx_status *status) -{ -	u8 cck, rate, rate_idx, bw, sgi, mcs, nss; -	u8 info0 = info->rate.info0; -	u32 info1 = info->rate.info1; -	u32 info2 = info->rate.info2; -	u8 preamble = 0; - -	/* Check if valid fields */ -	if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) -		return; - -	preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); - -	switch (preamble) { -	case HTT_RX_LEGACY: -		cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; -		rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); -		rate_idx = 0; - -		if (rate < 0x08 || rate > 0x0F) -			break; - -		switch (band) { -		case IEEE80211_BAND_2GHZ: -			if (cck) -				rate &= ~BIT(3); -			rate_idx = rx_legacy_rate_idx[rate]; -			break; -		case IEEE80211_BAND_5GHZ: -			rate_idx = rx_legacy_rate_idx[rate]; -			/* We are using same rate table registering -			   HW - ath10k_rates[]. In case of 5GHz skip -			   CCK rates, so -4 here */ -			rate_idx -= 4; -			break; -		default: -			break; -		} - -		status->rate_idx = rate_idx; -		break; -	case HTT_RX_HT: -	case HTT_RX_HT_WITH_TXBF: -		/* HT-SIG - Table 20-11 in info1 and info2 */ -		mcs = info1 & 0x1F; -		nss = mcs >> 3; -		bw = (info1 >> 7) & 1; -		sgi = (info2 >> 7) & 1; - -		status->rate_idx = mcs; -		status->flag |= RX_FLAG_HT; -		if (sgi) -			status->flag |= RX_FLAG_SHORT_GI; -		if (bw) -			status->flag |= RX_FLAG_40MHZ; -		break; -	case HTT_RX_VHT: -	case HTT_RX_VHT_WITH_TXBF: -		/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 -		   TODO check this */ -		mcs = (info2 >> 4) & 0x0F; -		nss = (info1 >> 10) & 0x07; -		bw = info1 & 3; -		sgi = info2 & 1; - -		status->rate_idx = mcs; -		status->vht_nss = nss; - -		if (sgi) -			status->flag |= RX_FLAG_SHORT_GI; - -		switch (bw) { -		/* 20MHZ */ -		case 0: -			break; -		/* 40MHZ */ -		case 1: -			status->flag |= RX_FLAG_40MHZ; -			break; -		/* 80MHZ */ -		case 2: -			status->flag |= RX_FLAG_80MHZ; -		} - -		status->flag |= RX_FLAG_VHT; -		break; -	default: -		break; -	} -} - -void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info) -{ -	struct ieee80211_rx_status *status; -	struct ieee80211_channel *ch; -	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data; - -	status = IEEE80211_SKB_RXCB(info->skb); -	memset(status, 0, sizeof(*status)); - -	if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { -		status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | -				RX_FLAG_MMIC_STRIPPED; -		hdr->frame_control = __cpu_to_le16( -				__le16_to_cpu(hdr->frame_control) & -				~IEEE80211_FCTL_PROTECTED); -	} - -	if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) -		status->flag |= RX_FLAG_MMIC_ERROR; - -	if (info->fcs_err) -		status->flag |= RX_FLAG_FAILED_FCS_CRC; - -	status->signal = info->signal; - -	spin_lock_bh(&ar->data_lock); -	ch = ar->scan_channel; -	if (!ch) -		ch = ar->rx_channel; -	spin_unlock_bh(&ar->data_lock); - -	if (!ch) { -		ath10k_warn("no channel configured; ignoring frame!\n"); -		dev_kfree_skb_any(info->skb); -		return; -	} - -	process_rx_rates(ar, info, ch->band, status); -	status->band = ch->band; -	status->freq = ch->center_freq; - -	ath10k_dbg(ATH10K_DBG_DATA, -		   "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n", -		   info->skb, -		   info->skb->len, -		   status->flag == 0 ? "legacy" : "", -		   status->flag & RX_FLAG_HT ? "ht" : "", -		   status->flag & RX_FLAG_VHT ? "vht" : "", -		   status->flag & RX_FLAG_40MHZ ? "40" : "", -		   status->flag & RX_FLAG_80MHZ ? "80" : "", -		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", -		   status->rate_idx, -		   status->vht_nss, -		   status->freq, -		   status->band); - -	ieee80211_rx(ar->hw, info->skb);  }  struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, @@ -397,7 +200,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,  	spin_lock_bh(&ar->data_lock);  	peer = ath10k_peer_find_by_id(ar, ev->peer_id);  	if (!peer) { -		ath10k_warn("unknown peer id %d\n", ev->peer_id); +		ath10k_warn("peer-unmap-event: unknown peer id %d\n", +			    ev->peer_id);  		goto exit;  	} diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h index e78632a76df..aee3e20058f 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.h +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -19,10 +19,8 @@  #include "htt.h" -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc); -void ath10k_txrx_tx_completed(struct ath10k_htt *htt, -			      const struct htt_tx_done *tx_done); -void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info); +void ath10k_txrx_tx_unref(struct ath10k_htt *htt, +			  const struct htt_tx_done *tx_done);  struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,  				     const u8 *addr); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 55f90c76186..4b7782a529a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -16,6 +16,7 @@   */  #include <linux/skbuff.h> +#include <linux/ctype.h>  #include "core.h"  #include "htc.h" @@ -23,29 +24,468 @@  #include "wmi.h"  #include "mac.h" -void ath10k_wmi_flush_tx(struct ath10k *ar) -{ -	int ret; - -	lockdep_assert_held(&ar->conf_mutex); - -	if (ar->state == ATH10K_STATE_WEDGED) { -		ath10k_warn("wmi flush skipped - device is wedged anyway\n"); -		return; -	} - -	ret = wait_event_timeout(ar->wmi.wq, -				 atomic_read(&ar->wmi.pending_tx_count) == 0, -				 5*HZ); -	if (atomic_read(&ar->wmi.pending_tx_count) == 0) -		return; - -	if (ret == 0) -		ret = -ETIMEDOUT; - -	if (ret < 0) -		ath10k_warn("wmi flush failed (%d)\n", ret); -} +/* MAIN WMI cmd track */ +static struct wmi_cmd_map wmi_cmd_map = { +	.init_cmdid = WMI_INIT_CMDID, +	.start_scan_cmdid = WMI_START_SCAN_CMDID, +	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID, +	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, +	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, +	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, +	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, +	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, +	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID, +	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID, +	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID, +	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID, +	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID, +	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID, +	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID, +	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, +	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID, +	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID, +	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID, +	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID, +	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID, +	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID, +	.vdev_up_cmdid = WMI_VDEV_UP_CMDID, +	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID, +	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID, +	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID, +	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID, +	.peer_create_cmdid = WMI_PEER_CREATE_CMDID, +	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID, +	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID, +	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID, +	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID, +	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID, +	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID, +	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID, +	.bcn_tx_cmdid = WMI_BCN_TX_CMDID, +	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID, +	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID, +	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID, +	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID, +	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID, +	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID, +	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID, +	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID, +	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID, +	.delba_send_cmdid = WMI_DELBA_SEND_CMDID, +	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID, +	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID, +	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID, +	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID, +	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID, +	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID, +	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID, +	.roam_scan_mode = WMI_ROAM_SCAN_MODE, +	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD, +	.roam_scan_period = WMI_ROAM_SCAN_PERIOD, +	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, +	.roam_ap_profile = WMI_ROAM_AP_PROFILE, +	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE, +	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE, +	.ofl_scan_period = WMI_OFL_SCAN_PERIOD, +	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO, +	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY, +	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE, +	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE, +	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID, +	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID, +	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID, +	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID, +	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID, +	.wlan_profile_set_hist_intvl_cmdid = +				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, +	.wlan_profile_get_profile_data_cmdid = +				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, +	.wlan_profile_enable_profile_id_cmdid = +				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, +	.wlan_profile_list_profile_id_cmdid = +				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, +	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID, +	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID, +	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID, +	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID, +	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID, +	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID, +	.wow_enable_disable_wake_event_cmdid = +				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, +	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID, +	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, +	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID, +	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID, +	.vdev_spectral_scan_configure_cmdid = +				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, +	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, +	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID, +	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID, +	.network_list_offload_config_cmdid = +				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, +	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID, +	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID, +	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, +	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID, +	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID, +	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID, +	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID, +	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID, +	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD, +	.echo_cmdid = WMI_ECHO_CMDID, +	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID, +	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID, +	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID, +	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID, +	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID, +	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID, +	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID, +	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, +	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, +}; + +/* 10.X WMI cmd track */ +static struct wmi_cmd_map wmi_10x_cmd_map = { +	.init_cmdid = WMI_10X_INIT_CMDID, +	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID, +	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, +	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, +	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, +	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, +	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, +	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, +	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, +	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, +	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, +	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, +	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, +	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, +	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID, +	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, +	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, +	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, +	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID, +	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID, +	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID, +	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID, +	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID, +	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID, +	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID, +	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID, +	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID, +	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID, +	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID, +	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID, +	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID, +	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID, +	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, +	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, +	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID, +	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID, +	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID, +	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, +	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID, +	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID, +	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID, +	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, +	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID, +	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID, +	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID, +	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID, +	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID, +	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID, +	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID, +	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID, +	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID, +	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID, +	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID, +	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE, +	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, +	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD, +	.roam_scan_rssi_change_threshold = +				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, +	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE, +	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE, +	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, +	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD, +	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO, +	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY, +	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE, +	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE, +	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, +	.ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID, +	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, +	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, +	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, +	.wlan_profile_set_hist_intvl_cmdid = +				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, +	.wlan_profile_get_profile_data_cmdid = +				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, +	.wlan_profile_enable_profile_id_cmdid = +				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, +	.wlan_profile_list_profile_id_cmdid = +				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, +	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID, +	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID, +	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID, +	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID, +	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, +	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, +	.wow_enable_disable_wake_event_cmdid = +				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, +	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID, +	.wow_hostwakeup_from_sleep_cmdid = +				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, +	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID, +	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID, +	.vdev_spectral_scan_configure_cmdid = +				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, +	.vdev_spectral_scan_enable_cmdid = +				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, +	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID, +	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, +	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, +	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, +	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, +	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, +	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, +	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, +	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, +	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, +	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, +	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, +	.echo_cmdid = WMI_10X_ECHO_CMDID, +	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID, +	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID, +	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID, +	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, +	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, +	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, +	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, +	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, +	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, +}; + +/* MAIN WMI VDEV param map */ +static struct wmi_vdev_param_map wmi_vdev_param_map = { +	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD, +	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, +	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL, +	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL, +	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE, +	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE, +	.slot_time = WMI_VDEV_PARAM_SLOT_TIME, +	.preamble = WMI_VDEV_PARAM_PREAMBLE, +	.swba_time = WMI_VDEV_PARAM_SWBA_TIME, +	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD, +	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME, +	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL, +	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD, +	.wmi_vdev_oc_scheduler_air_time_limit = +					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, +	.wds = WMI_VDEV_PARAM_WDS, +	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW, +	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX, +	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT, +	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT, +	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM, +	.chwidth = WMI_VDEV_PARAM_CHWIDTH, +	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET, +	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION, +	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT, +	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE, +	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE, +	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE, +	.sgi = WMI_VDEV_PARAM_SGI, +	.ldpc = WMI_VDEV_PARAM_LDPC, +	.tx_stbc = WMI_VDEV_PARAM_TX_STBC, +	.rx_stbc = WMI_VDEV_PARAM_RX_STBC, +	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD, +	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID, +	.nss = WMI_VDEV_PARAM_NSS, +	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE, +	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE, +	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE, +	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE, +	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, +	.ap_keepalive_min_idle_inactive_time_secs = +			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, +	.ap_keepalive_max_idle_inactive_time_secs = +			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, +	.ap_keepalive_max_unresponsive_time_secs = +			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, +	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS, +	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED, +	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS, +	.txbf = WMI_VDEV_PARAM_TXBF, +	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE, +	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY, +	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, +	.ap_detect_out_of_sync_sleeping_sta_time_secs = +					WMI_VDEV_PARAM_UNSUPPORTED, +}; + +/* 10.X WMI VDEV param map */ +static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { +	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, +	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, +	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, +	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, +	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, +	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, +	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, +	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE, +	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, +	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, +	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, +	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, +	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, +	.wmi_vdev_oc_scheduler_air_time_limit = +				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, +	.wds = WMI_10X_VDEV_PARAM_WDS, +	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, +	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, +	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, +	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, +	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, +	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, +	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, +	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, +	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, +	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, +	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, +	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, +	.sgi = WMI_10X_VDEV_PARAM_SGI, +	.ldpc = WMI_10X_VDEV_PARAM_LDPC, +	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, +	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, +	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, +	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, +	.nss = WMI_10X_VDEV_PARAM_NSS, +	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, +	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, +	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, +	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, +	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, +	.ap_keepalive_min_idle_inactive_time_secs = +		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, +	.ap_keepalive_max_idle_inactive_time_secs = +		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, +	.ap_keepalive_max_unresponsive_time_secs = +		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, +	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, +	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, +	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, +	.txbf = WMI_VDEV_PARAM_UNSUPPORTED, +	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, +	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, +	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, +	.ap_detect_out_of_sync_sleeping_sta_time_secs = +		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, +}; + +static struct wmi_pdev_param_map wmi_pdev_param_map = { +	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK, +	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK, +	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G, +	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G, +	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE, +	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE, +	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE, +	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, +	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE, +	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW, +	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, +	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH, +	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH, +	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, +	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE, +	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, +	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, +	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, +	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, +	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, +	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, +	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE, +	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, +	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE, +	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE, +	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, +	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, +	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, +	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, +	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, +	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, +	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, +	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, +	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS, +	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, +	.dcs = WMI_PDEV_PARAM_DCS, +	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE, +	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD, +	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, +	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL, +	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL, +	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN, +	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA, +	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG, +	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP, +	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED, +	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, +	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { +	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, +	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, +	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, +	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, +	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, +	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, +	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, +	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, +	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, +	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, +	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, +	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, +	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, +	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, +	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, +	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, +	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, +	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, +	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, +	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, +	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, +	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, +	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, +	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, +	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, +	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, +	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, +	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, +	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, +	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, +	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, +	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, +	.bcnflt_stats_update_period = +				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, +	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, +	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, +	.dcs = WMI_10X_PDEV_PARAM_DCS, +	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, +	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, +	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, +	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, +	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, +	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, +	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, +	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, +	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, +	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, +	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, +	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, +};  int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)  { @@ -85,18 +525,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)  static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)  {  	dev_kfree_skb(skb); - -	if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0) -		wake_up(&ar->wmi.wq);  } -/* WMI command API */ -static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, -			       enum wmi_cmd_id cmd_id) +static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, +				      u32 cmd_id)  {  	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);  	struct wmi_cmd_hdr *cmd_hdr; -	int status; +	int ret;  	u32 cmd = 0;  	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) @@ -107,25 +543,151 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,  	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;  	cmd_hdr->cmd_id = __cpu_to_le32(cmd); -	if (atomic_add_return(1, &ar->wmi.pending_tx_count) > -	    WMI_MAX_PENDING_TX_COUNT) { -		/* avoid using up memory when FW hangs */ -		atomic_dec(&ar->wmi.pending_tx_count); -		return -EBUSY; +	memset(skb_cb, 0, sizeof(*skb_cb)); +	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); +	trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret); + +	if (ret) +		goto err_pull; + +	return 0; + +err_pull: +	skb_pull(skb, sizeof(struct wmi_cmd_hdr)); +	return ret; +} + +static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) +{ +	int ret; + +	lockdep_assert_held(&arvif->ar->data_lock); + +	if (arvif->beacon == NULL) +		return; + +	if (arvif->beacon_sent) +		return; + +	ret = ath10k_wmi_beacon_send_ref_nowait(arvif); +	if (ret) +		return; + +	/* We need to retain the arvif->beacon reference for DMA unmapping and +	 * freeing the skbuff later. */ +	arvif->beacon_sent = true; +} + +static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, +				       struct ieee80211_vif *vif) +{ +	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + +	ath10k_wmi_tx_beacon_nowait(arvif); +} + +static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) +{ +	spin_lock_bh(&ar->data_lock); +	ieee80211_iterate_active_interfaces_atomic(ar->hw, +						   IEEE80211_IFACE_ITER_NORMAL, +						   ath10k_wmi_tx_beacons_iter, +						   NULL); +	spin_unlock_bh(&ar->data_lock); +} + +static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) +{ +	/* try to send pending beacons first. they take priority */ +	ath10k_wmi_tx_beacons_nowait(ar); + +	wake_up(&ar->wmi.tx_credits_wq); +} + +static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, +			       u32 cmd_id) +{ +	int ret = -EOPNOTSUPP; + +	might_sleep(); + +	if (cmd_id == WMI_CMD_UNSUPPORTED) { +		ath10k_warn("wmi command %d is not supported by firmware\n", +			    cmd_id); +		return ret;  	} -	memset(skb_cb, 0, sizeof(*skb_cb)); +	wait_event_timeout(ar->wmi.tx_credits_wq, ({ +		/* try to send pending beacons first. they take priority */ +		ath10k_wmi_tx_beacons_nowait(ar); -	trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); +		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); +		(ret != -EAGAIN); +	}), 3*HZ); -	status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); -	if (status) { +	if (ret)  		dev_kfree_skb_any(skb); -		atomic_dec(&ar->wmi.pending_tx_count); -		return status; + +	return ret; +} + +int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) +{ +	int ret = 0; +	struct wmi_mgmt_tx_cmd *cmd; +	struct ieee80211_hdr *hdr; +	struct sk_buff *wmi_skb; +	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); +	int len; +	u32 buf_len = skb->len; +	u16 fc; + +	hdr = (struct ieee80211_hdr *)skb->data; +	fc = le16_to_cpu(hdr->frame_control); + +	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) +		return -EINVAL; + +	len = sizeof(cmd->hdr) + skb->len; + +	if ((ieee80211_is_action(hdr->frame_control) || +	     ieee80211_is_deauth(hdr->frame_control) || +	     ieee80211_is_disassoc(hdr->frame_control)) && +	     ieee80211_has_protected(hdr->frame_control)) { +		len += IEEE80211_CCMP_MIC_LEN; +		buf_len += IEEE80211_CCMP_MIC_LEN;  	} -	return 0; +	len = round_up(len, 4); + +	wmi_skb = ath10k_wmi_alloc_skb(len); +	if (!wmi_skb) +		return -ENOMEM; + +	cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data; + +	cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); +	cmd->hdr.tx_rate = 0; +	cmd->hdr.tx_power = 0; +	cmd->hdr.buf_len = __cpu_to_le32(buf_len); + +	memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); +	memcpy(cmd->buf, skb->data, skb->len); + +	ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", +		   wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, +		   fc & IEEE80211_FCTL_STYPE); + +	/* Send the management frame buffer to the target */ +	ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); +	if (ret) +		return ret; + +	/* TODO: report tx status to mac80211 - temporary just ACK */ +	info->flags |= IEEE80211_TX_STAT_ACK; +	ieee80211_tx_status_irqsafe(ar->hw, skb); + +	return ret;  }  static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) @@ -315,8 +877,11 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)  static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)  { -	struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data; +	struct wmi_mgmt_rx_event_v1 *ev_v1; +	struct wmi_mgmt_rx_event_v2 *ev_v2; +	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); +	struct ieee80211_channel *ch;  	struct ieee80211_hdr *hdr;  	u32 rx_status;  	u32 channel; @@ -325,19 +890,35 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)  	u32 rate;  	u32 buf_len;  	u16 fc; +	int pull_len; + +	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { +		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; +		ev_hdr = &ev_v2->hdr.v1; +		pull_len = sizeof(*ev_v2); +	} else { +		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data; +		ev_hdr = &ev_v1->hdr; +		pull_len = sizeof(*ev_v1); +	} -	channel   = __le32_to_cpu(event->hdr.channel); -	buf_len   = __le32_to_cpu(event->hdr.buf_len); -	rx_status = __le32_to_cpu(event->hdr.status); -	snr       = __le32_to_cpu(event->hdr.snr); -	phy_mode  = __le32_to_cpu(event->hdr.phy_mode); -	rate	  = __le32_to_cpu(event->hdr.rate); +	channel   = __le32_to_cpu(ev_hdr->channel); +	buf_len   = __le32_to_cpu(ev_hdr->buf_len); +	rx_status = __le32_to_cpu(ev_hdr->status); +	snr       = __le32_to_cpu(ev_hdr->snr); +	phy_mode  = __le32_to_cpu(ev_hdr->phy_mode); +	rate	  = __le32_to_cpu(ev_hdr->rate);  	memset(status, 0, sizeof(*status));  	ath10k_dbg(ATH10K_DBG_MGMT,  		   "event mgmt rx status %08x\n", rx_status); +	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { +		dev_kfree_skb(skb); +		return 0; +	} +  	if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {  		dev_kfree_skb(skb);  		return 0; @@ -353,21 +934,49 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)  	if (rx_status & WMI_RX_STATUS_ERR_MIC)  		status->flag |= RX_FLAG_MMIC_ERROR; -	status->band = phy_mode_to_band(phy_mode); +	/* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to +	 * MODE_11B. This means phy_mode is not a reliable source for the band +	 * of mgmt rx. */ + +	ch = ar->scan_channel; +	if (!ch) +		ch = ar->rx_channel; + +	if (ch) { +		status->band = ch->band; + +		if (phy_mode == MODE_11B && +		    status->band == IEEE80211_BAND_5GHZ) +			ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); +	} else { +		ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n"); +		status->band = phy_mode_to_band(phy_mode); +	} +  	status->freq = ieee80211_channel_to_frequency(channel, status->band);  	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;  	status->rate_idx = get_rate_idx(rate, status->band); -	skb_pull(skb, sizeof(event->hdr)); +	skb_pull(skb, pull_len);  	hdr = (struct ieee80211_hdr *)skb->data;  	fc = le16_to_cpu(hdr->frame_control); -	if (fc & IEEE80211_FCTL_PROTECTED) { -		status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | -				RX_FLAG_MMIC_STRIPPED; -		hdr->frame_control = __cpu_to_le16(fc & +	/* FW delivers WEP Shared Auth frame with Protected Bit set and +	 * encrypted payload. However in case of PMF it delivers decrypted +	 * frames with Protected Bit set. */ +	if (ieee80211_has_protected(hdr->frame_control) && +	    !ieee80211_is_auth(hdr->frame_control)) { +		status->flag |= RX_FLAG_DECRYPTED; + +		if (!ieee80211_is_action(hdr->frame_control) && +		    !ieee80211_is_deauth(hdr->frame_control) && +		    !ieee80211_is_disassoc(hdr->frame_control)) { +			status->flag |= RX_FLAG_IV_STRIPPED | +					RX_FLAG_MMIC_STRIPPED; +			hdr->frame_control = __cpu_to_le16(fc &  					~IEEE80211_FCTL_PROTECTED); +		}  	}  	ath10k_dbg(ATH10K_DBG_MGMT, @@ -473,9 +1082,14 @@ static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)  	ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");  } -static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)  { -	ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n"); +	ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", +		   skb->len); + +	trace_ath10k_wmi_dbglog(skb->data, skb->len); + +	return 0;  }  static void ath10k_wmi_event_update_stats(struct ath10k *ar, @@ -513,7 +1127,27 @@ static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,  static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,  					      struct sk_buff *skb)  { -	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n"); +	struct wmi_peer_sta_kickout_event *ev; +	struct ieee80211_sta *sta; + +	ev = (struct wmi_peer_sta_kickout_event *)skb->data; + +	ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", +		   ev->peer_macaddr.addr); + +	rcu_read_lock(); + +	sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); +	if (!sta) { +		ath10k_warn("Spurious quick kickout for STA %pM\n", +			    ev->peer_macaddr.addr); +		goto exit; +	} + +	ieee80211_report_low_ack(sta, 10); + +exit: +	rcu_read_unlock();  }  /* @@ -614,6 +1248,13 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,  	tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);  	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len); +	if (tim->dtim_count == 0) { +		ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true; + +		if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1) +			ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; +	} +  	ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",  		   tim->dtim_count, tim->dtim_period,  		   tim->bitmap_ctrl, pvm_len); @@ -734,18 +1375,13 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)  	int i = -1;  	struct wmi_bcn_info *bcn_info;  	struct ath10k_vif *arvif; -	struct wmi_bcn_tx_arg arg;  	struct sk_buff *bcn; -	int vdev_id = 0; -	int ret; - -	ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); +	int ret, vdev_id = 0;  	ev = (struct wmi_host_swba_event *)skb->data;  	map = __le32_to_cpu(ev->vdev_map); -	ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n" -		   "-vdev map 0x%x\n", +	ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",  		   ev->vdev_map);  	for (; map; map >>= 1, vdev_id++) { @@ -762,12 +1398,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)  		bcn_info = &ev->bcn_info[i];  		ath10k_dbg(ATH10K_DBG_MGMT, -			   "-bcn_info[%d]:\n" -			   "--tim_len %d\n" -			   "--tim_mcast %d\n" -			   "--tim_changed %d\n" -			   "--tim_num_ps_pending %d\n" -			   "--tim_bitmap 0x%08x%08x%08x%08x\n", +			   "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",  			   i,  			   __le32_to_cpu(bcn_info->tim_info.tim_len),  			   __le32_to_cpu(bcn_info->tim_info.tim_mcast), @@ -784,6 +1415,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)  			continue;  		} +		/* There are no completions for beacons so wait for next SWBA +		 * before telling mac80211 to decrement CSA counter +		 * +		 * Once CSA counter is completed stop sending beacons until +		 * actual channel switch is done */ +		if (arvif->vif->csa_active && +		    ieee80211_csa_is_complete(arvif->vif)) { +			ieee80211_csa_finish(arvif->vif); +			continue; +		} +  		bcn = ieee80211_beacon_get(ar->hw, arvif->vif);  		if (!bcn) {  			ath10k_warn("could not get mac80211 beacon\n"); @@ -794,17 +1436,37 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)  		ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);  		ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); -		arg.vdev_id = arvif->vdev_id; -		arg.tx_rate = 0; -		arg.tx_power = 0; -		arg.bcn = bcn->data; -		arg.bcn_len = bcn->len; +		spin_lock_bh(&ar->data_lock); -		ret = ath10k_wmi_beacon_send(ar, &arg); -		if (ret) -			ath10k_warn("could not send beacon (%d)\n", ret); +		if (arvif->beacon) { +			if (!arvif->beacon_sent) +				ath10k_warn("SWBA overrun on vdev %d\n", +					    arvif->vdev_id); -		dev_kfree_skb_any(bcn); +			dma_unmap_single(arvif->ar->dev, +					 ATH10K_SKB_CB(arvif->beacon)->paddr, +					 arvif->beacon->len, DMA_TO_DEVICE); +			dev_kfree_skb_any(arvif->beacon); +			arvif->beacon = NULL; +		} + +		ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev, +							   bcn->data, bcn->len, +							   DMA_TO_DEVICE); +		ret = dma_mapping_error(arvif->ar->dev, +					ATH10K_SKB_CB(bcn)->paddr); +		if (ret) { +			ath10k_warn("failed to map beacon: %d\n", ret); +			dev_kfree_skb_any(bcn); +			goto skip; +		} + +		arvif->beacon = bcn; +		arvif->beacon_sent = false; + +		ath10k_wmi_tx_beacon_nowait(arvif); +skip: +		spin_unlock_bh(&ar->data_lock);  	}  } @@ -814,9 +1476,259 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,  	ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");  } +static void ath10k_dfs_radar_report(struct ath10k *ar, +				    struct wmi_single_phyerr_rx_event *event, +				    struct phyerr_radar_report *rr, +				    u64 tsf) +{ +	u32 reg0, reg1, tsf32l; +	struct pulse_event pe; +	u64 tsf64; +	u8 rssi, width; + +	reg0 = __le32_to_cpu(rr->reg0); +	reg1 = __le32_to_cpu(rr->reg1); + +	ath10k_dbg(ATH10K_DBG_REGULATORY, +		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", +		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), +		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), +		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), +		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); +	ath10k_dbg(ATH10K_DBG_REGULATORY, +		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", +		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), +		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), +		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), +		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), +		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); +	ath10k_dbg(ATH10K_DBG_REGULATORY, +		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", +		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), +		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); + +	if (!ar->dfs_detector) +		return; + +	/* report event to DFS pattern detector */ +	tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp); +	tsf64 = tsf & (~0xFFFFFFFFULL); +	tsf64 |= tsf32l; + +	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); +	rssi = event->hdr.rssi_combined; + +	/* hardware store this as 8 bit signed value, +	 * set to zero if negative number +	 */ +	if (rssi & 0x80) +		rssi = 0; + +	pe.ts = tsf64; +	pe.freq = ar->hw->conf.chandef.chan->center_freq; +	pe.width = width; +	pe.rssi = rssi; + +	ath10k_dbg(ATH10K_DBG_REGULATORY, +		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", +		   pe.freq, pe.width, pe.rssi, pe.ts); + +	ATH10K_DFS_STAT_INC(ar, pulses_detected); + +	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { +		ath10k_dbg(ATH10K_DBG_REGULATORY, +			   "dfs no pulse pattern detected, yet\n"); +		return; +	} + +	ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); +	ATH10K_DFS_STAT_INC(ar, radar_detected); + +	/* Control radar events reporting in debugfs file +	   dfs_block_radar_events */ +	if (ar->dfs_block_radar_events) { +		ath10k_info("DFS Radar detected, but ignored as requested\n"); +		return; +	} + +	ieee80211_radar_detected(ar->hw); +} + +static int ath10k_dfs_fft_report(struct ath10k *ar, +				 struct wmi_single_phyerr_rx_event *event, +				 struct phyerr_fft_report *fftr, +				 u64 tsf) +{ +	u32 reg0, reg1; +	u8 rssi, peak_mag; + +	reg0 = __le32_to_cpu(fftr->reg0); +	reg1 = __le32_to_cpu(fftr->reg1); +	rssi = event->hdr.rssi_combined; + +	ath10k_dbg(ATH10K_DBG_REGULATORY, +		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", +		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), +		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), +		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), +		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); +	ath10k_dbg(ATH10K_DBG_REGULATORY, +		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", +		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), +		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), +		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG), +		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB)); + +	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); + +	/* false event detection */ +	if (rssi == DFS_RSSI_POSSIBLY_FALSE && +	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { +		ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); +		ATH10K_DFS_STAT_INC(ar, pulses_discarded); +		return -EINVAL; +	} + +	return 0; +} + +static void ath10k_wmi_event_dfs(struct ath10k *ar, +				 struct wmi_single_phyerr_rx_event *event, +				 u64 tsf) +{ +	int buf_len, tlv_len, res, i = 0; +	struct phyerr_tlv *tlv; +	struct phyerr_radar_report *rr; +	struct phyerr_fft_report *fftr; +	u8 *tlv_buf; + +	buf_len = __le32_to_cpu(event->hdr.buf_len); +	ath10k_dbg(ATH10K_DBG_REGULATORY, +		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", +		   event->hdr.phy_err_code, event->hdr.rssi_combined, +		   __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); + +	/* Skip event if DFS disabled */ +	if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) +		return; + +	ATH10K_DFS_STAT_INC(ar, pulses_total); + +	while (i < buf_len) { +		if (i + sizeof(*tlv) > buf_len) { +			ath10k_warn("too short buf for tlv header (%d)\n", i); +			return; +		} + +		tlv = (struct phyerr_tlv *)&event->bufp[i]; +		tlv_len = __le16_to_cpu(tlv->len); +		tlv_buf = &event->bufp[i + sizeof(*tlv)]; +		ath10k_dbg(ATH10K_DBG_REGULATORY, +			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", +			   tlv_len, tlv->tag, tlv->sig); + +		switch (tlv->tag) { +		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: +			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { +				ath10k_warn("too short radar pulse summary (%d)\n", +					    i); +				return; +			} + +			rr = (struct phyerr_radar_report *)tlv_buf; +			ath10k_dfs_radar_report(ar, event, rr, tsf); +			break; +		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: +			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { +				ath10k_warn("too short fft report (%d)\n", i); +				return; +			} + +			fftr = (struct phyerr_fft_report *)tlv_buf; +			res = ath10k_dfs_fft_report(ar, event, fftr, tsf); +			if (res) +				return; +			break; +		} + +		i += sizeof(*tlv) + tlv_len; +	} +} + +static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, +				struct wmi_single_phyerr_rx_event *event, +				u64 tsf) +{ +	ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n"); +} +  static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)  { -	ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n"); +	struct wmi_comb_phyerr_rx_event *comb_event; +	struct wmi_single_phyerr_rx_event *event; +	u32 count, i, buf_len, phy_err_code; +	u64 tsf; +	int left_len = skb->len; + +	ATH10K_DFS_STAT_INC(ar, phy_errors); + +	/* Check if combined event available */ +	if (left_len < sizeof(*comb_event)) { +		ath10k_warn("wmi phyerr combined event wrong len\n"); +		return; +	} + +	left_len -= sizeof(*comb_event); + +	/* Check number of included events */ +	comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data; +	count = __le32_to_cpu(comb_event->hdr.num_phyerr_events); + +	tsf = __le32_to_cpu(comb_event->hdr.tsf_u32); +	tsf <<= 32; +	tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); + +	ath10k_dbg(ATH10K_DBG_WMI, +		   "wmi event phyerr count %d tsf64 0x%llX\n", +		   count, tsf); + +	event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp; +	for (i = 0; i < count; i++) { +		/* Check if we can read event header */ +		if (left_len < sizeof(*event)) { +			ath10k_warn("single event (%d) wrong head len\n", i); +			return; +		} + +		left_len -= sizeof(*event); + +		buf_len = __le32_to_cpu(event->hdr.buf_len); +		phy_err_code = event->hdr.phy_err_code; + +		if (left_len < buf_len) { +			ath10k_warn("single event (%d) wrong buf len\n", i); +			return; +		} + +		left_len -= buf_len; + +		switch (phy_err_code) { +		case PHY_ERROR_RADAR: +			ath10k_wmi_event_dfs(ar, event, tsf); +			break; +		case PHY_ERROR_SPECTRAL_SCAN: +			ath10k_wmi_event_spectral_scan(ar, event, tsf); +			break; +		case PHY_ERROR_FALSE_RADAR_EXT: +			ath10k_wmi_event_dfs(ar, event, tsf); +			ath10k_wmi_event_spectral_scan(ar, event, tsf); +			break; +		default: +			break; +		} + +		event += sizeof(*event) + buf_len; +	}  }  static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) @@ -831,9 +1743,37 @@ static void ath10k_wmi_event_profile_match(struct ath10k *ar,  }  static void ath10k_wmi_event_debug_print(struct ath10k *ar, -				  struct sk_buff *skb) +					 struct sk_buff *skb)  { -	ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n"); +	char buf[101], c; +	int i; + +	for (i = 0; i < sizeof(buf) - 1; i++) { +		if (i >= skb->len) +			break; + +		c = skb->data[i]; + +		if (c == '\0') +			break; + +		if (isascii(c) && isprint(c)) +			buf[i] = c; +		else +			buf[i] = '.'; +	} + +	if (i == sizeof(buf) - 1) +		ath10k_warn("wmi debug print truncated: %d\n", skb->len); + +	/* for some reason the debug prints end with \n, remove that */ +	if (skb->data[i - 1] == '\n') +		i--; + +	/* the last byte is always reserved for the null character */ +	buf[i] = '\0'; + +	ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);  }  static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) @@ -919,6 +1859,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,  	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");  } +static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, +					     struct sk_buff *skb) +{ +	ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); +} + +static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, +					      struct sk_buff *skb) +{ +	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); +} + +static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, +					     struct sk_buff *skb) +{ +	ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); +} + +static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, +				      u32 num_units, u32 unit_len) +{ +	dma_addr_t paddr; +	u32 pool_size; +	int idx = ar->wmi.num_mem_chunks; + +	pool_size = num_units * round_up(unit_len, 4); + +	if (!pool_size) +		return -EINVAL; + +	ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, +							   pool_size, +							   &paddr, +							   GFP_ATOMIC); +	if (!ar->wmi.mem_chunks[idx].vaddr) { +		ath10k_warn("failed to allocate memory chunk\n"); +		return -ENOMEM; +	} + +	memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); + +	ar->wmi.mem_chunks[idx].paddr = paddr; +	ar->wmi.mem_chunks[idx].len = pool_size; +	ar->wmi.mem_chunks[idx].req_id = req_id; +	ar->wmi.num_mem_chunks++; + +	return 0; +} +  static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,  					      struct sk_buff *skb)  { @@ -943,6 +1932,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,  	ar->phy_capability = __le32_to_cpu(ev->phy_capability);  	ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); +	/* only manually set fw features when not using FW IE format */ +	if (ar->fw_api == 1 && ar->fw_version_build > 636) +		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); +  	if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {  		ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",  			    ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); @@ -987,6 +1980,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,  	complete(&ar->wmi.service_ready);  } +static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, +						  struct sk_buff *skb) +{ +	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; +	int ret; +	struct wmi_service_ready_event_10x *ev = (void *)skb->data; + +	if (skb->len < sizeof(*ev)) { +		ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", +			    skb->len, sizeof(*ev)); +		return; +	} + +	ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); +	ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); +	ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); +	ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); +	ar->fw_version_major = +		(__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; +	ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); +	ar->phy_capability = __le32_to_cpu(ev->phy_capability); +	ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); + +	if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { +		ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", +			    ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); +		ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; +	} + +	ar->ath_common.regulatory.current_rd = +		__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); + +	ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, +				      sizeof(ev->wmi_service_bitmap)); + +	if (strlen(ar->hw->wiphy->fw_version) == 0) { +		snprintf(ar->hw->wiphy->fw_version, +			 sizeof(ar->hw->wiphy->fw_version), +			 "%u.%u", +			 ar->fw_version_major, +			 ar->fw_version_minor); +	} + +	num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); + +	if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { +		ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", +			    num_mem_reqs); +		return; +	} + +	if (!num_mem_reqs) +		goto exit; + +	ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", +		   num_mem_reqs); + +	for (i = 0; i < num_mem_reqs; ++i) { +		req_id = __le32_to_cpu(ev->mem_reqs[i].req_id); +		num_units = __le32_to_cpu(ev->mem_reqs[i].num_units); +		unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size); +		num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info); + +		if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) +			/* number of units to allocate is number of +			 * peers, 1 extra for self peer on target */ +			/* this needs to be tied, host and target +			 * can get out of sync */ +			num_units = TARGET_10X_NUM_PEERS + 1; +		else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) +			num_units = TARGET_10X_NUM_VDEVS + 1; + +		ath10k_dbg(ATH10K_DBG_WMI, +			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", +			   req_id, +			   __le32_to_cpu(ev->mem_reqs[i].num_units), +			   num_unit_info, +			   unit_size, +			   num_units); + +		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units, +						unit_size); +		if (ret) +			return; +	} + +exit: +	ath10k_dbg(ATH10K_DBG_WMI, +		   "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", +		   __le32_to_cpu(ev->sw_version), +		   __le32_to_cpu(ev->abi_version), +		   __le32_to_cpu(ev->phy_capability), +		   __le32_to_cpu(ev->ht_cap_info), +		   __le32_to_cpu(ev->vht_cap_info), +		   __le32_to_cpu(ev->vht_supp_mcs), +		   __le32_to_cpu(ev->sys_cap_info), +		   __le32_to_cpu(ev->num_mem_reqs), +		   __le32_to_cpu(ev->num_rf_chains)); + +	complete(&ar->wmi.service_ready); +} +  static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)  {  	struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; @@ -997,17 +2092,17 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)  	memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);  	ath10k_dbg(ATH10K_DBG_WMI, -		   "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n", +		   "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",  		   __le32_to_cpu(ev->sw_version),  		   __le32_to_cpu(ev->abi_version),  		   ev->mac_addr.addr, -		   __le32_to_cpu(ev->status)); +		   __le32_to_cpu(ev->status), skb->len, sizeof(*ev));  	complete(&ar->wmi.unified_ready);  	return 0;  } -static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)  {  	struct wmi_cmd_hdr *cmd_hdr;  	enum wmi_event_id id; @@ -1126,67 +2221,161 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)  	dev_kfree_skb(skb);  } -static void ath10k_wmi_event_work(struct work_struct *work) +static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)  { -	struct ath10k *ar = container_of(work, struct ath10k, -					 wmi.wmi_event_work); -	struct sk_buff *skb; +	struct wmi_cmd_hdr *cmd_hdr; +	enum wmi_10x_event_id id; +	u16 len; -	for (;;) { -		skb = skb_dequeue(&ar->wmi.wmi_event_list); -		if (!skb) -			break; +	cmd_hdr = (struct wmi_cmd_hdr *)skb->data; +	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); -		ath10k_wmi_event_process(ar, skb); -	} -} +	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) +		return; -static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) -{ -	struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data; -	enum wmi_event_id event_id; +	len = skb->len; -	event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); +	trace_ath10k_wmi_event(id, skb->data, skb->len); -	/* some events require to be handled ASAP -	 * thus can't be defered to a worker thread */ -	switch (event_id) { -	case WMI_HOST_SWBA_EVENTID: -	case WMI_MGMT_RX_EVENTID: -		ath10k_wmi_event_process(ar, skb); +	switch (id) { +	case WMI_10X_MGMT_RX_EVENTID: +		ath10k_wmi_event_mgmt_rx(ar, skb); +		/* mgmt_rx() owns the skb now! */  		return; +	case WMI_10X_SCAN_EVENTID: +		ath10k_wmi_event_scan(ar, skb); +		break; +	case WMI_10X_CHAN_INFO_EVENTID: +		ath10k_wmi_event_chan_info(ar, skb); +		break; +	case WMI_10X_ECHO_EVENTID: +		ath10k_wmi_event_echo(ar, skb); +		break; +	case WMI_10X_DEBUG_MESG_EVENTID: +		ath10k_wmi_event_debug_mesg(ar, skb); +		break; +	case WMI_10X_UPDATE_STATS_EVENTID: +		ath10k_wmi_event_update_stats(ar, skb); +		break; +	case WMI_10X_VDEV_START_RESP_EVENTID: +		ath10k_wmi_event_vdev_start_resp(ar, skb); +		break; +	case WMI_10X_VDEV_STOPPED_EVENTID: +		ath10k_wmi_event_vdev_stopped(ar, skb); +		break; +	case WMI_10X_PEER_STA_KICKOUT_EVENTID: +		ath10k_wmi_event_peer_sta_kickout(ar, skb); +		break; +	case WMI_10X_HOST_SWBA_EVENTID: +		ath10k_wmi_event_host_swba(ar, skb); +		break; +	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID: +		ath10k_wmi_event_tbttoffset_update(ar, skb); +		break; +	case WMI_10X_PHYERR_EVENTID: +		ath10k_wmi_event_phyerr(ar, skb); +		break; +	case WMI_10X_ROAM_EVENTID: +		ath10k_wmi_event_roam(ar, skb); +		break; +	case WMI_10X_PROFILE_MATCH: +		ath10k_wmi_event_profile_match(ar, skb); +		break; +	case WMI_10X_DEBUG_PRINT_EVENTID: +		ath10k_wmi_event_debug_print(ar, skb); +		break; +	case WMI_10X_PDEV_QVIT_EVENTID: +		ath10k_wmi_event_pdev_qvit(ar, skb); +		break; +	case WMI_10X_WLAN_PROFILE_DATA_EVENTID: +		ath10k_wmi_event_wlan_profile_data(ar, skb); +		break; +	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID: +		ath10k_wmi_event_rtt_measurement_report(ar, skb); +		break; +	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID: +		ath10k_wmi_event_tsf_measurement_report(ar, skb); +		break; +	case WMI_10X_RTT_ERROR_REPORT_EVENTID: +		ath10k_wmi_event_rtt_error_report(ar, skb); +		break; +	case WMI_10X_WOW_WAKEUP_HOST_EVENTID: +		ath10k_wmi_event_wow_wakeup_host(ar, skb); +		break; +	case WMI_10X_DCS_INTERFERENCE_EVENTID: +		ath10k_wmi_event_dcs_interference(ar, skb); +		break; +	case WMI_10X_PDEV_TPC_CONFIG_EVENTID: +		ath10k_wmi_event_pdev_tpc_config(ar, skb); +		break; +	case WMI_10X_INST_RSSI_STATS_EVENTID: +		ath10k_wmi_event_inst_rssi_stats(ar, skb); +		break; +	case WMI_10X_VDEV_STANDBY_REQ_EVENTID: +		ath10k_wmi_event_vdev_standby_req(ar, skb); +		break; +	case WMI_10X_VDEV_RESUME_REQ_EVENTID: +		ath10k_wmi_event_vdev_resume_req(ar, skb); +		break; +	case WMI_10X_SERVICE_READY_EVENTID: +		ath10k_wmi_10x_service_ready_event_rx(ar, skb); +		break; +	case WMI_10X_READY_EVENTID: +		ath10k_wmi_ready_event_rx(ar, skb); +		break;  	default: +		ath10k_warn("Unknown eventid: %d\n", id);  		break;  	} -	skb_queue_tail(&ar->wmi.wmi_event_list, skb); -	queue_work(ar->workqueue, &ar->wmi.wmi_event_work); +	dev_kfree_skb(skb); +} + + +static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) +		ath10k_wmi_10x_process_rx(ar, skb); +	else +		ath10k_wmi_main_process_rx(ar, skb);  }  /* WMI Initialization functions */  int ath10k_wmi_attach(struct ath10k *ar)  { +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { +		ar->wmi.cmd = &wmi_10x_cmd_map; +		ar->wmi.vdev_param = &wmi_10x_vdev_param_map; +		ar->wmi.pdev_param = &wmi_10x_pdev_param_map; +	} else { +		ar->wmi.cmd = &wmi_cmd_map; +		ar->wmi.vdev_param = &wmi_vdev_param_map; +		ar->wmi.pdev_param = &wmi_pdev_param_map; +	} +  	init_completion(&ar->wmi.service_ready);  	init_completion(&ar->wmi.unified_ready); -	init_waitqueue_head(&ar->wmi.wq); - -	skb_queue_head_init(&ar->wmi.wmi_event_list); -	INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work); +	init_waitqueue_head(&ar->wmi.tx_credits_wq);  	return 0;  }  void ath10k_wmi_detach(struct ath10k *ar)  { -	/* HTC should've drained the packets already */ -	if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0)) -		ath10k_warn("there are still pending packets\n"); +	int i; + +	/* free the host memory chunks requested by firmware */ +	for (i = 0; i < ar->wmi.num_mem_chunks; i++) { +		dma_free_coherent(ar->dev, +				  ar->wmi.mem_chunks[i].len, +				  ar->wmi.mem_chunks[i].vaddr, +				  ar->wmi.mem_chunks[i].paddr); +	} -	cancel_work_sync(&ar->wmi.wmi_event_work); -	skb_queue_purge(&ar->wmi.wmi_event_list); +	ar->wmi.num_mem_chunks = 0;  } -int ath10k_wmi_connect_htc_service(struct ath10k *ar) +int ath10k_wmi_connect(struct ath10k *ar)  {  	int status;  	struct ath10k_htc_svc_conn_req conn_req; @@ -1198,6 +2387,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)  	/* these fields are the same for all service endpoints */  	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;  	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; +	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;  	/* connect to control service */  	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; @@ -1213,8 +2403,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)  	return 0;  } -int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, -				  u16 rd5g, u16 ctl2g, u16 ctl5g) +static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd, +					      u16 rd2g, u16 rd5g, u16 ctl2g, +					      u16 ctl5g)  {  	struct wmi_pdev_set_regdomain_cmd *cmd;  	struct sk_buff *skb; @@ -1234,7 +2425,48 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,  		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",  		   rd, rd2g, rd5g, ctl2g, ctl5g); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->pdev_set_regdomain_cmdid); +} + +static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd, +					     u16 rd2g, u16 rd5g, +					     u16 ctl2g, u16 ctl5g, +					     enum wmi_dfs_region dfs_reg) +{ +	struct wmi_pdev_set_regdomain_cmd_10x *cmd; +	struct sk_buff *skb; + +	skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); +	if (!skb) +		return -ENOMEM; + +	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data; +	cmd->reg_domain = __cpu_to_le32(rd); +	cmd->reg_domain_2G = __cpu_to_le32(rd2g); +	cmd->reg_domain_5G = __cpu_to_le32(rd5g); +	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); +	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); +	cmd->dfs_domain = __cpu_to_le32(dfs_reg); + +	ath10k_dbg(ATH10K_DBG_WMI, +		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", +		   rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); + +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->pdev_set_regdomain_cmdid); +} + +int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, +				  u16 rd5g, u16 ctl2g, u16 ctl5g, +				  enum wmi_dfs_region dfs_reg) +{ +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) +		return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g, +							ctl2g, ctl5g, dfs_reg); +	else +		return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g, +							 ctl2g, ctl5g);  }  int ath10k_wmi_pdev_set_channel(struct ath10k *ar, @@ -1242,6 +2474,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,  {  	struct wmi_set_channel_cmd *cmd;  	struct sk_buff *skb; +	u32 ch_flags = 0;  	if (arg->passive)  		return -EINVAL; @@ -1250,10 +2483,14 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,  	if (!skb)  		return -ENOMEM; +	if (arg->chan_radar) +		ch_flags |= WMI_CHAN_FLAG_DFS; +  	cmd = (struct wmi_set_channel_cmd *)skb->data;  	cmd->chan.mhz               = __cpu_to_le32(arg->freq);  	cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);  	cmd->chan.mode              = arg->mode; +	cmd->chan.flags		   |= __cpu_to_le32(ch_flags);  	cmd->chan.min_power         = arg->min_power;  	cmd->chan.max_power         = arg->max_power;  	cmd->chan.reg_power         = arg->max_reg_power; @@ -1264,10 +2501,11 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,  		   "wmi set channel mode %d freq %d\n",  		   arg->mode, arg->freq); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->pdev_set_channel_cmdid);  } -int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) +int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)  {  	struct wmi_pdev_suspend_cmd *cmd;  	struct sk_buff *skb; @@ -1277,9 +2515,9 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)  		return -ENOMEM;  	cmd = (struct wmi_pdev_suspend_cmd *)skb->data; -	cmd->suspend_opt = WMI_PDEV_SUSPEND; +	cmd->suspend_opt = __cpu_to_le32(suspend_opt); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);  }  int ath10k_wmi_pdev_resume_target(struct ath10k *ar) @@ -1290,15 +2528,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)  	if (skb == NULL)  		return -ENOMEM; -	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);  } -int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, -			      u32 value) +int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)  {  	struct wmi_pdev_set_param_cmd *cmd;  	struct sk_buff *skb; +	if (id == WMI_PDEV_PARAM_UNSUPPORTED) { +		ath10k_warn("pdev param %d not supported by firmware\n", id); +		return -EOPNOTSUPP; +	} +  	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));  	if (!skb)  		return -ENOMEM; @@ -1309,15 +2551,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,  	ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",  		   id, value); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);  } -int ath10k_wmi_cmd_init(struct ath10k *ar) +static int ath10k_wmi_main_cmd_init(struct ath10k *ar)  {  	struct wmi_init_cmd *cmd;  	struct sk_buff *buf;  	struct wmi_resource_config config = {}; -	u32 val; +	u32 len, val; +	int i;  	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);  	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); @@ -1370,23 +2613,158 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)  	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);  	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); -	buf = ath10k_wmi_alloc_skb(sizeof(*cmd)); +	len = sizeof(*cmd) + +	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + +	buf = ath10k_wmi_alloc_skb(len);  	if (!buf)  		return -ENOMEM;  	cmd = (struct wmi_init_cmd *)buf->data; -	cmd->num_host_mem_chunks = 0; + +	if (ar->wmi.num_mem_chunks == 0) { +		cmd->num_host_mem_chunks = 0; +		goto out; +	} + +	ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", +		   ar->wmi.num_mem_chunks); + +	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); + +	for (i = 0; i < ar->wmi.num_mem_chunks; i++) { +		cmd->host_mem_chunks[i].ptr = +			__cpu_to_le32(ar->wmi.mem_chunks[i].paddr); +		cmd->host_mem_chunks[i].size = +			__cpu_to_le32(ar->wmi.mem_chunks[i].len); +		cmd->host_mem_chunks[i].req_id = +			__cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + +		ath10k_dbg(ATH10K_DBG_WMI, +			   "wmi chunk %d len %d requested, addr 0x%llx\n", +			   i, +			   ar->wmi.mem_chunks[i].len, +			   (unsigned long long)ar->wmi.mem_chunks[i].paddr); +	} +out:  	memcpy(&cmd->resource_config, &config, sizeof(config));  	ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); -	return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID); +	return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); +} + +static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) +{ +	struct wmi_init_cmd_10x *cmd; +	struct sk_buff *buf; +	struct wmi_resource_config_10x config = {}; +	u32 len, val; +	int i; + +	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); +	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); +	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); +	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); +	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); +	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); +	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); +	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); +	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); +	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); +	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); +	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); + +	config.scan_max_pending_reqs = +		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); + +	config.bmiss_offload_max_vdev = +		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); + +	config.roam_offload_max_vdev = +		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); + +	config.roam_offload_max_ap_profiles = +		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); + +	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); +	config.num_mcast_table_elems = +		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); + +	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); +	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); +	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); +	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); +	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); + +	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; +	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); + +	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); + +	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); +	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); + +	len = sizeof(*cmd) + +	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + +	buf = ath10k_wmi_alloc_skb(len); +	if (!buf) +		return -ENOMEM; + +	cmd = (struct wmi_init_cmd_10x *)buf->data; + +	if (ar->wmi.num_mem_chunks == 0) { +		cmd->num_host_mem_chunks = 0; +		goto out; +	} + +	ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", +		   ar->wmi.num_mem_chunks); + +	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); + +	for (i = 0; i < ar->wmi.num_mem_chunks; i++) { +		cmd->host_mem_chunks[i].ptr = +			__cpu_to_le32(ar->wmi.mem_chunks[i].paddr); +		cmd->host_mem_chunks[i].size = +			__cpu_to_le32(ar->wmi.mem_chunks[i].len); +		cmd->host_mem_chunks[i].req_id = +			__cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + +		ath10k_dbg(ATH10K_DBG_WMI, +			   "wmi chunk %d len %d requested, addr 0x%llx\n", +			   i, +			   ar->wmi.mem_chunks[i].len, +			   (unsigned long long)ar->wmi.mem_chunks[i].paddr); +	} +out: +	memcpy(&cmd->resource_config, &config, sizeof(config)); + +	ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); +	return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);  } -static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) +int ath10k_wmi_cmd_init(struct ath10k *ar) +{ +	int ret; + +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) +		ret = ath10k_wmi_10x_cmd_init(ar); +	else +		ret = ath10k_wmi_main_cmd_init(ar); + +	return ret; +} + +static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, +					  const struct wmi_start_scan_arg *arg)  {  	int len; -	len = sizeof(struct wmi_start_scan_cmd); +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) +		len = sizeof(struct wmi_start_scan_cmd_10x); +	else +		len = sizeof(struct wmi_start_scan_cmd);  	if (arg->ie_len) {  		if (!arg->ie) @@ -1446,7 +2824,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,  	int len = 0;  	int i; -	len = ath10k_wmi_start_scan_calc_len(arg); +	len = ath10k_wmi_start_scan_calc_len(ar, arg);  	if (len < 0)  		return len; /* len contains error code here */ @@ -1478,7 +2856,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar,  	cmd->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);  	/* TLV list starts after fields included in the struct */ -	off = sizeof(*cmd); +	/* There's just one filed that differes the two start_scan +	 * structures - burst_duration, which we are not using btw, +	   no point to make the split here, just shift the buffer to fit with +	   given FW */ +	if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) +		off = sizeof(struct wmi_start_scan_cmd_10x); +	else +		off = sizeof(struct wmi_start_scan_cmd);  	if (arg->n_channels) {  		channels = (void *)skb->data + off; @@ -1540,7 +2925,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,  	}  	ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); -	return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);  }  void ath10k_wmi_start_scan_init(struct ath10k *ar, @@ -1556,7 +2941,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,  	arg->repeat_probe_time = 0;  	arg->probe_spacing_time = 0;  	arg->idle_time = 0; -	arg->max_scan_time = 5000; +	arg->max_scan_time = 20000;  	arg->probe_delay = 5;  	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED  		| WMI_SCAN_EVENT_COMPLETED @@ -1600,7 +2985,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",  		   arg->req_id, arg->req_type, arg->u.scan_id); -	return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);  }  int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, @@ -1625,7 +3010,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,  		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",  		   vdev_id, type, subtype, macaddr); -	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);  }  int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) @@ -1643,20 +3028,21 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)  	ath10k_dbg(ATH10K_DBG_WMI,  		   "WMI vdev delete id %d\n", vdev_id); -	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);  }  static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,  				const struct wmi_vdev_start_request_arg *arg, -				enum wmi_cmd_id cmd_id) +				u32 cmd_id)  {  	struct wmi_vdev_start_request_cmd *cmd;  	struct sk_buff *skb;  	const char *cmdname;  	u32 flags = 0; +	u32 ch_flags = 0; -	if (cmd_id != WMI_VDEV_START_REQUEST_CMDID && -	    cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID) +	if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && +	    cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)  		return -EINVAL;  	if (WARN_ON(arg->ssid && arg->ssid_len == 0))  		return -EINVAL; @@ -1665,9 +3051,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,  	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))  		return -EINVAL; -	if (cmd_id == WMI_VDEV_START_REQUEST_CMDID) +	if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)  		cmdname = "start"; -	else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID) +	else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)  		cmdname = "restart";  	else  		return -EINVAL; /* should not happen, we already check cmd_id */ @@ -1680,6 +3066,8 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,  		flags |= WMI_VDEV_START_HIDDEN_SSID;  	if (arg->pmf_enabled)  		flags |= WMI_VDEV_START_PMF_ENABLED; +	if (arg->channel.chan_radar) +		ch_flags |= WMI_CHAN_FLAG_DFS;  	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;  	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id); @@ -1701,6 +3089,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,  		__cpu_to_le32(arg->channel.band_center_freq1);  	cmd->chan.mode = arg->channel.mode; +	cmd->chan.flags |= __cpu_to_le32(ch_flags);  	cmd->chan.min_power = arg->channel.min_power;  	cmd->chan.max_power = arg->channel.max_power;  	cmd->chan.reg_power = arg->channel.max_reg_power; @@ -1708,9 +3097,10 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,  	cmd->chan.antenna_max = arg->channel.max_antenna_gain;  	ath10k_dbg(ATH10K_DBG_WMI, -		   "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X," -		   "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq, -		   arg->channel.mode, flags, arg->channel.max_power); +		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " +		   "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, +		   flags, arg->channel.freq, arg->channel.mode, +		   cmd->chan.flags, arg->channel.max_power);  	return ath10k_wmi_cmd_send(ar, skb, cmd_id);  } @@ -1718,15 +3108,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,  int ath10k_wmi_vdev_start(struct ath10k *ar,  			  const struct wmi_vdev_start_request_arg *arg)  { -	return ath10k_wmi_vdev_start_restart(ar, arg, -					     WMI_VDEV_START_REQUEST_CMDID); +	u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid; + +	return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);  }  int ath10k_wmi_vdev_restart(struct ath10k *ar,  		     const struct wmi_vdev_start_request_arg *arg)  { -	return ath10k_wmi_vdev_start_restart(ar, arg, -					     WMI_VDEV_RESTART_REQUEST_CMDID); +	u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid; + +	return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);  }  int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) @@ -1743,7 +3135,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)  	ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); -	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);  }  int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) @@ -1758,13 +3150,13 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)  	cmd = (struct wmi_vdev_up_cmd *)skb->data;  	cmd->vdev_id       = __cpu_to_le32(vdev_id);  	cmd->vdev_assoc_id = __cpu_to_le32(aid); -	memcpy(&cmd->vdev_bssid.addr, bssid, 6); +	memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",  		   vdev_id, aid, bssid); -	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);  }  int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) @@ -1782,15 +3174,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi mgmt vdev down id 0x%x\n", vdev_id); -	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);  }  int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, -			      enum wmi_vdev_param param_id, u32 param_value) +			      u32 param_id, u32 param_value)  {  	struct wmi_vdev_set_param_cmd *cmd;  	struct sk_buff *skb; +	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { +		ath10k_dbg(ATH10K_DBG_WMI, +			   "vdev param %d not supported by firmware\n", +			    param_id); +		return -EOPNOTSUPP; +	} +  	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));  	if (!skb)  		return -ENOMEM; @@ -1804,7 +3203,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,  		   "wmi vdev id 0x%x set param %d value %d\n",  		   vdev_id, param_id, param_value); -	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);  }  int ath10k_wmi_vdev_install_key(struct ath10k *ar, @@ -1839,7 +3238,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi vdev install key idx %d cipher %d len %d\n",  		   arg->key_idx, arg->key_cipher, arg->key_len); -	return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->vdev_install_key_cmdid);  }  int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, @@ -1859,7 +3259,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi peer create vdev_id %d peer_addr %pM\n",  		   vdev_id, peer_addr); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);  }  int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, @@ -1879,7 +3279,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi peer delete vdev_id %d peer_addr %pM\n",  		   vdev_id, peer_addr); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);  }  int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, @@ -1900,7 +3300,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",  		   vdev_id, peer_addr, tid_bitmap); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);  }  int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, @@ -1918,13 +3318,13 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,  	cmd->vdev_id     = __cpu_to_le32(vdev_id);  	cmd->param_id    = __cpu_to_le32(param_id);  	cmd->param_value = __cpu_to_le32(param_value); -	memcpy(&cmd->peer_macaddr.addr, peer_addr, 6); +	memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi vdev %d peer 0x%pM set param %d value %d\n",  		   vdev_id, peer_addr, param_id, param_value); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);  }  int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, @@ -1945,7 +3345,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,  		   "wmi set powersave id 0x%x mode %d\n",  		   vdev_id, psmode); -	return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->sta_powersave_mode_cmdid);  }  int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, @@ -1967,7 +3368,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,  	ath10k_dbg(ATH10K_DBG_WMI,  		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",  		   vdev_id, param_id, value); -	return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->sta_powersave_param_cmdid);  }  int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, @@ -1993,7 +3395,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,  		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",  		   vdev_id, param_id, value, mac); -	return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->ap_ps_peer_param_cmdid);  }  int ath10k_wmi_scan_chan_list(struct ath10k *ar, @@ -2031,6 +3434,8 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,  			flags |= WMI_CHAN_FLAG_ALLOW_VHT;  		if (ch->ht40plus)  			flags |= WMI_CHAN_FLAG_HT40_PLUS; +		if (ch->chan_radar) +			flags |= WMI_CHAN_FLAG_DFS;  		ci->mhz               = __cpu_to_le32(ch->freq);  		ci->band_center_freq1 = __cpu_to_le32(ch->freq); @@ -2039,14 +3444,13 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,  		ci->max_power         = ch->max_power;  		ci->reg_power         = ch->max_reg_power;  		ci->antenna_max       = ch->max_antenna_gain; -		ci->antenna_max       = 0;  		/* mode & flags share storage */  		ci->mode              = ch->mode;  		ci->flags            |= __cpu_to_le32(flags);  	} -	return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);  }  int ath10k_wmi_peer_assoc(struct ath10k *ar, @@ -2103,28 +3507,51 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,  		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);  	ath10k_dbg(ATH10K_DBG_WMI, -		   "wmi peer assoc vdev %d addr %pM\n", -		   arg->vdev_id, arg->addr); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); +		   "wmi peer assoc vdev %d addr %pM (%s)\n", +		   arg->vdev_id, arg->addr, +		   arg->peer_reassoc ? "reassociate" : "new"); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);  } -int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) +/* This function assumes the beacon is already DMA mapped */ +int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)  { -	struct wmi_bcn_tx_cmd *cmd; +	struct wmi_bcn_tx_ref_cmd *cmd;  	struct sk_buff *skb; +	struct sk_buff *beacon = arvif->beacon; +	struct ath10k *ar = arvif->ar; +	struct ieee80211_hdr *hdr; +	int ret; +	u16 fc; -	skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len); +	skb = ath10k_wmi_alloc_skb(sizeof(*cmd));  	if (!skb)  		return -ENOMEM; -	cmd = (struct wmi_bcn_tx_cmd *)skb->data; -	cmd->hdr.vdev_id  = __cpu_to_le32(arg->vdev_id); -	cmd->hdr.tx_rate  = __cpu_to_le32(arg->tx_rate); -	cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power); -	cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len); -	memcpy(cmd->bcn, arg->bcn, arg->bcn_len); +	hdr = (struct ieee80211_hdr *)beacon->data; +	fc = le16_to_cpu(hdr->frame_control); -	return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID); +	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data; +	cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); +	cmd->data_len = __cpu_to_le32(beacon->len); +	cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr); +	cmd->msdu_id = 0; +	cmd->frame_control = __cpu_to_le32(fc); +	cmd->flags = 0; + +	if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) +		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); + +	if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab) +		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); + +	ret = ath10k_wmi_cmd_send_nowait(ar, skb, +					 ar->wmi.cmd->pdev_send_bcn_cmdid); + +	if (ret) +		dev_kfree_skb(skb); + +	return ret;  }  static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, @@ -2155,7 +3582,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,  	ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);  	ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); -	return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, +				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);  }  int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) @@ -2171,7 +3599,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)  	cmd->stats_id = __cpu_to_le32(stats_id);  	ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); -	return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);  }  int ath10k_wmi_force_fw_hang(struct ath10k *ar, @@ -2190,5 +3618,42 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,  	ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",  		   type, delay_ms); -	return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID); +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); +} + +int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) +{ +	struct wmi_dbglog_cfg_cmd *cmd; +	struct sk_buff *skb; +	u32 cfg; + +	skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); +	if (!skb) +		return -ENOMEM; + +	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data; + +	if (module_enable) { +		cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE, +			 ATH10K_DBGLOG_CFG_LOG_LVL); +	} else { +		/* set back defaults, all modules with WARN level */ +		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN, +			 ATH10K_DBGLOG_CFG_LOG_LVL); +		module_enable = ~0; +	} + +	cmd->module_enable = __cpu_to_le32(module_enable); +	cmd->module_valid = __cpu_to_le32(~0); +	cmd->config_enable = __cpu_to_le32(cfg); +	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); + +	ath10k_dbg(ATH10K_DBG_WMI, +		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n", +		   __le32_to_cpu(cmd->module_enable), +		   __le32_to_cpu(cmd->module_valid), +		   __le32_to_cpu(cmd->config_enable), +		   __le32_to_cpu(cmd->config_valid)); + +	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);  } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 2c5a4f8daf2..e93df2c1041 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -198,15 +198,117 @@ struct wmi_mac_addr {  	} __packed;  } __packed; -/* macro to convert MAC address from WMI word format to char array */ -#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \ -	(c_macaddr)[0] =  ((pwmi_mac_addr)->word0) & 0xff; \ -	(c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \ -	(c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \ -	(c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \ -	(c_macaddr)[4] =  ((pwmi_mac_addr)->word1) & 0xff; \ -	(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \ -	} while (0) +struct wmi_cmd_map { +	u32 init_cmdid; +	u32 start_scan_cmdid; +	u32 stop_scan_cmdid; +	u32 scan_chan_list_cmdid; +	u32 scan_sch_prio_tbl_cmdid; +	u32 pdev_set_regdomain_cmdid; +	u32 pdev_set_channel_cmdid; +	u32 pdev_set_param_cmdid; +	u32 pdev_pktlog_enable_cmdid; +	u32 pdev_pktlog_disable_cmdid; +	u32 pdev_set_wmm_params_cmdid; +	u32 pdev_set_ht_cap_ie_cmdid; +	u32 pdev_set_vht_cap_ie_cmdid; +	u32 pdev_set_dscp_tid_map_cmdid; +	u32 pdev_set_quiet_mode_cmdid; +	u32 pdev_green_ap_ps_enable_cmdid; +	u32 pdev_get_tpc_config_cmdid; +	u32 pdev_set_base_macaddr_cmdid; +	u32 vdev_create_cmdid; +	u32 vdev_delete_cmdid; +	u32 vdev_start_request_cmdid; +	u32 vdev_restart_request_cmdid; +	u32 vdev_up_cmdid; +	u32 vdev_stop_cmdid; +	u32 vdev_down_cmdid; +	u32 vdev_set_param_cmdid; +	u32 vdev_install_key_cmdid; +	u32 peer_create_cmdid; +	u32 peer_delete_cmdid; +	u32 peer_flush_tids_cmdid; +	u32 peer_set_param_cmdid; +	u32 peer_assoc_cmdid; +	u32 peer_add_wds_entry_cmdid; +	u32 peer_remove_wds_entry_cmdid; +	u32 peer_mcast_group_cmdid; +	u32 bcn_tx_cmdid; +	u32 pdev_send_bcn_cmdid; +	u32 bcn_tmpl_cmdid; +	u32 bcn_filter_rx_cmdid; +	u32 prb_req_filter_rx_cmdid; +	u32 mgmt_tx_cmdid; +	u32 prb_tmpl_cmdid; +	u32 addba_clear_resp_cmdid; +	u32 addba_send_cmdid; +	u32 addba_status_cmdid; +	u32 delba_send_cmdid; +	u32 addba_set_resp_cmdid; +	u32 send_singleamsdu_cmdid; +	u32 sta_powersave_mode_cmdid; +	u32 sta_powersave_param_cmdid; +	u32 sta_mimo_ps_mode_cmdid; +	u32 pdev_dfs_enable_cmdid; +	u32 pdev_dfs_disable_cmdid; +	u32 roam_scan_mode; +	u32 roam_scan_rssi_threshold; +	u32 roam_scan_period; +	u32 roam_scan_rssi_change_threshold; +	u32 roam_ap_profile; +	u32 ofl_scan_add_ap_profile; +	u32 ofl_scan_remove_ap_profile; +	u32 ofl_scan_period; +	u32 p2p_dev_set_device_info; +	u32 p2p_dev_set_discoverability; +	u32 p2p_go_set_beacon_ie; +	u32 p2p_go_set_probe_resp_ie; +	u32 p2p_set_vendor_ie_data_cmdid; +	u32 ap_ps_peer_param_cmdid; +	u32 ap_ps_peer_uapsd_coex_cmdid; +	u32 peer_rate_retry_sched_cmdid; +	u32 wlan_profile_trigger_cmdid; +	u32 wlan_profile_set_hist_intvl_cmdid; +	u32 wlan_profile_get_profile_data_cmdid; +	u32 wlan_profile_enable_profile_id_cmdid; +	u32 wlan_profile_list_profile_id_cmdid; +	u32 pdev_suspend_cmdid; +	u32 pdev_resume_cmdid; +	u32 add_bcn_filter_cmdid; +	u32 rmv_bcn_filter_cmdid; +	u32 wow_add_wake_pattern_cmdid; +	u32 wow_del_wake_pattern_cmdid; +	u32 wow_enable_disable_wake_event_cmdid; +	u32 wow_enable_cmdid; +	u32 wow_hostwakeup_from_sleep_cmdid; +	u32 rtt_measreq_cmdid; +	u32 rtt_tsf_cmdid; +	u32 vdev_spectral_scan_configure_cmdid; +	u32 vdev_spectral_scan_enable_cmdid; +	u32 request_stats_cmdid; +	u32 set_arp_ns_offload_cmdid; +	u32 network_list_offload_config_cmdid; +	u32 gtk_offload_cmdid; +	u32 csa_offload_enable_cmdid; +	u32 csa_offload_chanswitch_cmdid; +	u32 chatter_set_mode_cmdid; +	u32 peer_tid_addba_cmdid; +	u32 peer_tid_delba_cmdid; +	u32 sta_dtim_ps_method_cmdid; +	u32 sta_uapsd_auto_trig_cmdid; +	u32 sta_keepalive_cmd; +	u32 echo_cmdid; +	u32 pdev_utf_cmdid; +	u32 dbglog_cfg_cmdid; +	u32 pdev_qvit_cmdid; +	u32 pdev_ftm_intg_cmdid; +	u32 vdev_set_keepalive_cmdid; +	u32 vdev_get_keepalive_cmdid; +	u32 force_fw_hang_cmdid; +	u32 gpio_config_cmdid; +	u32 gpio_output_cmdid; +};  /*   * wmi command groups. @@ -247,7 +349,9 @@ enum wmi_cmd_group {  #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)  #define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) -/* Command IDs and commande events. */ +#define WMI_CMD_UNSUPPORTED 0 + +/* Command IDs and command events for MAIN FW. */  enum wmi_cmd_id {  	WMI_INIT_CMDID = 0x1, @@ -488,6 +592,217 @@ enum wmi_event_id {  	WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),  }; +/* Command IDs and command events for 10.X firmware */ +enum wmi_10x_cmd_id { +	WMI_10X_START_CMDID = 0x9000, +	WMI_10X_END_CMDID = 0x9FFF, + +	/* initialize the wlan sub system */ +	WMI_10X_INIT_CMDID, + +	/* Scan specific commands */ + +	WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID, +	WMI_10X_STOP_SCAN_CMDID, +	WMI_10X_SCAN_CHAN_LIST_CMDID, +	WMI_10X_ECHO_CMDID, + +	/* PDEV(physical device) specific commands */ +	WMI_10X_PDEV_SET_REGDOMAIN_CMDID, +	WMI_10X_PDEV_SET_CHANNEL_CMDID, +	WMI_10X_PDEV_SET_PARAM_CMDID, +	WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, +	WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, +	WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, +	WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, +	WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, +	WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, +	WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, +	WMI_10X_PDEV_SET_QUIET_MODE_CMDID, +	WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, +	WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, + +	/* VDEV(virtual device) specific commands */ +	WMI_10X_VDEV_CREATE_CMDID, +	WMI_10X_VDEV_DELETE_CMDID, +	WMI_10X_VDEV_START_REQUEST_CMDID, +	WMI_10X_VDEV_RESTART_REQUEST_CMDID, +	WMI_10X_VDEV_UP_CMDID, +	WMI_10X_VDEV_STOP_CMDID, +	WMI_10X_VDEV_DOWN_CMDID, +	WMI_10X_VDEV_STANDBY_RESPONSE_CMDID, +	WMI_10X_VDEV_RESUME_RESPONSE_CMDID, +	WMI_10X_VDEV_SET_PARAM_CMDID, +	WMI_10X_VDEV_INSTALL_KEY_CMDID, + +	/* peer specific commands */ +	WMI_10X_PEER_CREATE_CMDID, +	WMI_10X_PEER_DELETE_CMDID, +	WMI_10X_PEER_FLUSH_TIDS_CMDID, +	WMI_10X_PEER_SET_PARAM_CMDID, +	WMI_10X_PEER_ASSOC_CMDID, +	WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, +	WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, +	WMI_10X_PEER_MCAST_GROUP_CMDID, + +	/* beacon/management specific commands */ + +	WMI_10X_BCN_TX_CMDID, +	WMI_10X_BCN_PRB_TMPL_CMDID, +	WMI_10X_BCN_FILTER_RX_CMDID, +	WMI_10X_PRB_REQ_FILTER_RX_CMDID, +	WMI_10X_MGMT_TX_CMDID, + +	/* commands to directly control ba negotiation directly from host. */ +	WMI_10X_ADDBA_CLEAR_RESP_CMDID, +	WMI_10X_ADDBA_SEND_CMDID, +	WMI_10X_ADDBA_STATUS_CMDID, +	WMI_10X_DELBA_SEND_CMDID, +	WMI_10X_ADDBA_SET_RESP_CMDID, +	WMI_10X_SEND_SINGLEAMSDU_CMDID, + +	/* Station power save specific config */ +	WMI_10X_STA_POWERSAVE_MODE_CMDID, +	WMI_10X_STA_POWERSAVE_PARAM_CMDID, +	WMI_10X_STA_MIMO_PS_MODE_CMDID, + +	/* set debug log config */ +	WMI_10X_DBGLOG_CFG_CMDID, + +	/* DFS-specific commands */ +	WMI_10X_PDEV_DFS_ENABLE_CMDID, +	WMI_10X_PDEV_DFS_DISABLE_CMDID, + +	/* QVIT specific command id */ +	WMI_10X_PDEV_QVIT_CMDID, + +	/* Offload Scan and Roaming related  commands */ +	WMI_10X_ROAM_SCAN_MODE, +	WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, +	WMI_10X_ROAM_SCAN_PERIOD, +	WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, +	WMI_10X_ROAM_AP_PROFILE, +	WMI_10X_OFL_SCAN_ADD_AP_PROFILE, +	WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, +	WMI_10X_OFL_SCAN_PERIOD, + +	/* P2P specific commands */ +	WMI_10X_P2P_DEV_SET_DEVICE_INFO, +	WMI_10X_P2P_DEV_SET_DISCOVERABILITY, +	WMI_10X_P2P_GO_SET_BEACON_IE, +	WMI_10X_P2P_GO_SET_PROBE_RESP_IE, + +	/* AP power save specific config */ +	WMI_10X_AP_PS_PEER_PARAM_CMDID, +	WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID, + +	/* Rate-control specific commands */ +	WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, + +	/* WLAN Profiling commands. */ +	WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, +	WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, +	WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, +	WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, +	WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + +	/* Suspend resume command Ids */ +	WMI_10X_PDEV_SUSPEND_CMDID, +	WMI_10X_PDEV_RESUME_CMDID, + +	/* Beacon filter commands */ +	WMI_10X_ADD_BCN_FILTER_CMDID, +	WMI_10X_RMV_BCN_FILTER_CMDID, + +	/* WOW Specific WMI commands*/ +	WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, +	WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, +	WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, +	WMI_10X_WOW_ENABLE_CMDID, +	WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + +	/* RTT measurement related cmd */ +	WMI_10X_RTT_MEASREQ_CMDID, +	WMI_10X_RTT_TSF_CMDID, + +	/* transmit beacon by value */ +	WMI_10X_PDEV_SEND_BCN_CMDID, + +	/* F/W stats */ +	WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, +	WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, +	WMI_10X_REQUEST_STATS_CMDID, + +	/* GPIO Configuration */ +	WMI_10X_GPIO_CONFIG_CMDID, +	WMI_10X_GPIO_OUTPUT_CMDID, + +	WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1, +}; + +enum wmi_10x_event_id { +	WMI_10X_SERVICE_READY_EVENTID = 0x8000, +	WMI_10X_READY_EVENTID, +	WMI_10X_START_EVENTID = 0x9000, +	WMI_10X_END_EVENTID = 0x9FFF, + +	/* Scan specific events */ +	WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID, +	WMI_10X_ECHO_EVENTID, +	WMI_10X_DEBUG_MESG_EVENTID, +	WMI_10X_UPDATE_STATS_EVENTID, + +	/* Instantaneous RSSI event */ +	WMI_10X_INST_RSSI_STATS_EVENTID, + +	/* VDEV specific events */ +	WMI_10X_VDEV_START_RESP_EVENTID, +	WMI_10X_VDEV_STANDBY_REQ_EVENTID, +	WMI_10X_VDEV_RESUME_REQ_EVENTID, +	WMI_10X_VDEV_STOPPED_EVENTID, + +	/* peer  specific events */ +	WMI_10X_PEER_STA_KICKOUT_EVENTID, + +	/* beacon/mgmt specific events */ +	WMI_10X_HOST_SWBA_EVENTID, +	WMI_10X_TBTTOFFSET_UPDATE_EVENTID, +	WMI_10X_MGMT_RX_EVENTID, + +	/* Channel stats event */ +	WMI_10X_CHAN_INFO_EVENTID, + +	/* PHY Error specific WMI event */ +	WMI_10X_PHYERR_EVENTID, + +	/* Roam event to trigger roaming on host */ +	WMI_10X_ROAM_EVENTID, + +	/* matching AP found from list of profiles */ +	WMI_10X_PROFILE_MATCH, + +	/* debug print message used for tracing FW code while debugging */ +	WMI_10X_DEBUG_PRINT_EVENTID, +	/* VI spoecific event */ +	WMI_10X_PDEV_QVIT_EVENTID, +	/* FW code profile data in response to profile request */ +	WMI_10X_WLAN_PROFILE_DATA_EVENTID, + +	/*RTT related event ID*/ +	WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID, +	WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID, +	WMI_10X_RTT_ERROR_REPORT_EVENTID, + +	WMI_10X_WOW_WAKEUP_HOST_EVENTID, +	WMI_10X_DCS_INTERFERENCE_EVENTID, + +	/* TPC config for the current operating channel */ +	WMI_10X_PDEV_TPC_CONFIG_EVENTID, + +	WMI_10X_GPIO_INPUT_EVENTID, +	WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1, +}; +  enum wmi_phy_mode {  	MODE_11A        = 0,   /* 11a Mode */  	MODE_11G        = 1,   /* 11b/g Mode */ @@ -508,6 +823,48 @@ enum wmi_phy_mode {  	MODE_MAX        = 14  }; +static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) +{ +	switch (mode) { +	case MODE_11A: +		return "11a"; +	case MODE_11G: +		return "11g"; +	case MODE_11B: +		return "11b"; +	case MODE_11GONLY: +		return "11gonly"; +	case MODE_11NA_HT20: +		return "11na-ht20"; +	case MODE_11NG_HT20: +		return "11ng-ht20"; +	case MODE_11NA_HT40: +		return "11na-ht40"; +	case MODE_11NG_HT40: +		return "11ng-ht40"; +	case MODE_11AC_VHT20: +		return "11ac-vht20"; +	case MODE_11AC_VHT40: +		return "11ac-vht40"; +	case MODE_11AC_VHT80: +		return "11ac-vht80"; +	case MODE_11AC_VHT20_2G: +		return "11ac-vht20-2g"; +	case MODE_11AC_VHT40_2G: +		return "11ac-vht40-2g"; +	case MODE_11AC_VHT80_2G: +		return "11ac-vht80-2g"; +	case MODE_UNKNOWN: +		/* skip */ +		break; + +		/* no default handler to allow compiler to check that the +		 * enum is fully handled */ +	}; + +	return "<unknown>"; +} +  #define WMI_CHAN_LIST_TAG	0x1  #define WMI_SSID_LIST_TAG	0x2  #define WMI_BSSID_LIST_TAG	0x3 @@ -526,6 +883,7 @@ struct wmi_channel {  	union {  		__le32 reginfo0;  		struct { +			/* note: power unit is 0.5 dBm */  			u8 min_power;  			u8 max_power;  			u8 reg_power; @@ -548,7 +906,8 @@ struct wmi_channel_arg {  	bool allow_ht;  	bool allow_vht;  	bool ht40plus; -	/* note: power unit is 1/4th of dBm */ +	bool chan_radar; +	/* note: power unit is 0.5 dBm */  	u32 min_power;  	u32 max_power;  	u32 max_reg_power; @@ -763,13 +1122,45 @@ struct wmi_service_ready_event {  	struct wlan_host_mem_req mem_reqs[1];  } __packed; -/* - * status consists of  upper 16 bits fo int status and lower 16 bits of - * module ID that retuned status - */ -#define WLAN_INIT_STATUS_SUCCESS   0x0 -#define WLAN_GET_INIT_STATUS_REASON(status)    ((status) & 0xffff) -#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff) +/* This is the definition from 10.X firmware branch */ +struct wmi_service_ready_event_10x { +	__le32 sw_version; +	__le32 abi_version; + +	/* WMI_PHY_CAPABILITY */ +	__le32 phy_capability; + +	/* Maximum number of frag table entries that SW will populate less 1 */ +	__le32 max_frag_entry; +	__le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; +	__le32 num_rf_chains; + +	/* +	 * The following field is only valid for service type +	 * WMI_SERVICE_11AC +	 */ +	__le32 ht_cap_info; /* WMI HT Capability */ +	__le32 vht_cap_info; /* VHT capability info field of 802.11ac */ +	__le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */ +	__le32 hw_min_tx_power; +	__le32 hw_max_tx_power; + +	struct hal_reg_capabilities hal_reg_capabilities; + +	__le32 sys_cap_info; +	__le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */ + +	/* +	 * request to host to allocate a chuck of memory and pss it down to FW +	 * via WM_INIT. FW uses this as FW extesnsion memory for saving its +	 * data structures. Only valid for low latency interfaces like PCIE +	 * where FW can access this memory directly (or) by DMA. +	 */ +	__le32 num_mem_reqs; + +	struct wlan_host_mem_req mem_reqs[1]; +} __packed; +  #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)  #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) @@ -864,7 +1255,7 @@ struct wmi_resource_config {  	 */  	__le32 rx_decap_mode; -	/* what is the maximum scan requests than can be queued */ +	/* what is the maximum number of scan requests that can be queued */  	__le32 scan_max_pending_reqs;  	/* maximum VDEV that could use BMISS offload */ @@ -978,6 +1369,192 @@ struct wmi_resource_config {  	__le32 max_frag_entries;  } __packed; +struct wmi_resource_config_10x { +	/* number of virtual devices (VAPs) to support */ +	__le32 num_vdevs; + +	/* number of peer nodes to support */ +	__le32 num_peers; + +	/* number of keys per peer */ +	__le32 num_peer_keys; + +	/* total number of TX/RX data TIDs */ +	__le32 num_tids; + +	/* +	 * max skid for resolving hash collisions +	 * +	 *   The address search table is sparse, so that if two MAC addresses +	 *   result in the same hash value, the second of these conflicting +	 *   entries can slide to the next index in the address search table, +	 *   and use it, if it is unoccupied.  This ast_skid_limit parameter +	 *   specifies the upper bound on how many subsequent indices to search +	 *   over to find an unoccupied space. +	 */ +	__le32 ast_skid_limit; + +	/* +	 * the nominal chain mask for transmit +	 * +	 *   The chain mask may be modified dynamically, e.g. to operate AP +	 *   tx with a reduced number of chains if no clients are associated. +	 *   This configuration parameter specifies the nominal chain-mask that +	 *   should be used when not operating with a reduced set of tx chains. +	 */ +	__le32 tx_chain_mask; + +	/* +	 * the nominal chain mask for receive +	 * +	 *   The chain mask may be modified dynamically, e.g. for a client +	 *   to use a reduced number of chains for receive if the traffic to +	 *   the client is low enough that it doesn't require downlink MIMO +	 *   or antenna diversity. +	 *   This configuration parameter specifies the nominal chain-mask that +	 *   should be used when not operating with a reduced set of rx chains. +	 */ +	__le32 rx_chain_mask; + +	/* +	 * what rx reorder timeout (ms) to use for the AC +	 * +	 *   Each WMM access class (voice, video, best-effort, background) will +	 *   have its own timeout value to dictate how long to wait for missing +	 *   rx MPDUs to arrive before flushing subsequent MPDUs that have +	 *   already been received. +	 *   This parameter specifies the timeout in milliseconds for each +	 *   class. +	 */ +	__le32 rx_timeout_pri_vi; +	__le32 rx_timeout_pri_vo; +	__le32 rx_timeout_pri_be; +	__le32 rx_timeout_pri_bk; + +	/* +	 * what mode the rx should decap packets to +	 * +	 *   MAC can decap to RAW (no decap), native wifi or Ethernet types +	 *   THis setting also determines the default TX behavior, however TX +	 *   behavior can be modified on a per VAP basis during VAP init +	 */ +	__le32 rx_decap_mode; + +	/* what is the maximum number of scan requests that can be queued */ +	__le32 scan_max_pending_reqs; + +	/* maximum VDEV that could use BMISS offload */ +	__le32 bmiss_offload_max_vdev; + +	/* maximum VDEV that could use offload roaming */ +	__le32 roam_offload_max_vdev; + +	/* maximum AP profiles that would push to offload roaming */ +	__le32 roam_offload_max_ap_profiles; + +	/* +	 * how many groups to use for mcast->ucast conversion +	 * +	 *   The target's WAL maintains a table to hold information regarding +	 *   which peers belong to a given multicast group, so that if +	 *   multicast->unicast conversion is enabled, the target can convert +	 *   multicast tx frames to a series of unicast tx frames, to each +	 *   peer within the multicast group. +	     This num_mcast_groups configuration parameter tells the target how +	 *   many multicast groups to provide storage for within its multicast +	 *   group membership table. +	 */ +	__le32 num_mcast_groups; + +	/* +	 * size to alloc for the mcast membership table +	 * +	 *   This num_mcast_table_elems configuration parameter tells the +	 *   target how many peer elements it needs to provide storage for in +	 *   its multicast group membership table. +	 *   These multicast group membership table elements are shared by the +	 *   multicast groups stored within the table. +	 */ +	__le32 num_mcast_table_elems; + +	/* +	 * whether/how to do multicast->unicast conversion +	 * +	 *   This configuration parameter specifies whether the target should +	 *   perform multicast --> unicast conversion on transmit, and if so, +	 *   what to do if it finds no entries in its multicast group +	 *   membership table for the multicast IP address in the tx frame. +	 *   Configuration value: +	 *   0 -> Do not perform multicast to unicast conversion. +	 *   1 -> Convert multicast frames to unicast, if the IP multicast +	 *        address from the tx frame is found in the multicast group +	 *        membership table.  If the IP multicast address is not found, +	 *        drop the frame. +	 *   2 -> Convert multicast frames to unicast, if the IP multicast +	 *        address from the tx frame is found in the multicast group +	 *        membership table.  If the IP multicast address is not found, +	 *        transmit the frame as multicast. +	 */ +	__le32 mcast2ucast_mode; + +	/* +	 * how much memory to allocate for a tx PPDU dbg log +	 * +	 *   This parameter controls how much memory the target will allocate +	 *   to store a log of tx PPDU meta-information (how large the PPDU +	 *   was, when it was sent, whether it was successful, etc.) +	 */ +	__le32 tx_dbg_log_size; + +	/* how many AST entries to be allocated for WDS */ +	__le32 num_wds_entries; + +	/* +	 * MAC DMA burst size, e.g., For target PCI limit can be +	 * 0 -default, 1 256B +	 */ +	__le32 dma_burst_size; + +	/* +	 * Fixed delimiters to be inserted after every MPDU to +	 * account for interface latency to avoid underrun. +	 */ +	__le32 mac_aggr_delim; + +	/* +	 *   determine whether target is responsible for detecting duplicate +	 *   non-aggregate MPDU and timing out stale fragments. +	 * +	 *   A-MPDU reordering is always performed on the target. +	 * +	 *   0: target responsible for frag timeout and dup checking +	 *   1: host responsible for frag timeout and dup checking +	 */ +	__le32 rx_skip_defrag_timeout_dup_detection_check; + +	/* +	 * Configuration for VoW : +	 * No of Video Nodes to be supported +	 * and Max no of descriptors for each Video link (node). +	 */ +	__le32 vow_config; + +	/* Number of msdu descriptors target should use */ +	__le32 num_msdu_desc; + +	/* +	 * Max. number of Tx fragments per MSDU +	 *  This parameter controls the max number of Tx fragments per MSDU. +	 *  This is sent by the target as part of the WMI_SERVICE_READY event +	 *  and is overriden by the OS shim as required. +	 */ +	__le32 max_frag_entries; +} __packed; + + +#define NUM_UNITS_IS_NUM_VDEVS   0x1 +#define NUM_UNITS_IS_NUM_PEERS   0x2 +  /* strucutre describing host memory chunk. */  struct host_memory_chunk {  	/* id of the request that is passed up in service ready */ @@ -999,6 +1576,18 @@ struct wmi_init_cmd {  	struct host_memory_chunk host_mem_chunks[1];  } __packed; +/* _10x stucture is from 10.X FW API */ +struct wmi_init_cmd_10x { +	struct wmi_resource_config_10x resource_config; +	__le32 num_host_mem_chunks; + +	/* +	 * variable number of host memory chunks. +	 * This should be the last element in the structure +	 */ +	struct host_memory_chunk host_mem_chunks[1]; +} __packed; +  /* TLV for channel list */  struct wmi_chan_list {  	__le32 tag; /* WMI_CHAN_LIST_TAG */ @@ -1118,6 +1707,88 @@ struct wmi_start_scan_cmd {  	 */  } __packed; +/* This is the definition from 10.X firmware branch */ +struct wmi_start_scan_cmd_10x { +	/* Scan ID */ +	__le32 scan_id; + +	/* Scan requestor ID */ +	__le32 scan_req_id; + +	/* VDEV id(interface) that is requesting scan */ +	__le32 vdev_id; + +	/* Scan Priority, input to scan scheduler */ +	__le32 scan_priority; + +	/* Scan events subscription */ +	__le32 notify_scan_events; + +	/* dwell time in msec on active channels */ +	__le32 dwell_time_active; + +	/* dwell time in msec on passive channels */ +	__le32 dwell_time_passive; + +	/* +	 * min time in msec on the BSS channel,only valid if atleast one +	 * VDEV is active +	 */ +	__le32 min_rest_time; + +	/* +	 * max rest time in msec on the BSS channel,only valid if at least +	 * one VDEV is active +	 */ +	/* +	 * the scanner will rest on the bss channel at least min_rest_time +	 * after min_rest_time the scanner will start checking for tx/rx +	 * activity on all VDEVs. if there is no activity the scanner will +	 * switch to off channel. if there is activity the scanner will let +	 * the radio on the bss channel until max_rest_time expires.at +	 * max_rest_time scanner will switch to off channel irrespective of +	 * activity. activity is determined by the idle_time parameter. +	 */ +	__le32 max_rest_time; + +	/* +	 * time before sending next set of probe requests. +	 * The scanner keeps repeating probe requests transmission with +	 * period specified by repeat_probe_time. +	 * The number of probe requests specified depends on the ssid_list +	 * and bssid_list +	 */ +	__le32 repeat_probe_time; + +	/* time in msec between 2 consequetive probe requests with in a set. */ +	__le32 probe_spacing_time; + +	/* +	 * data inactivity time in msec on bss channel that will be used by +	 * scanner for measuring the inactivity. +	 */ +	__le32 idle_time; + +	/* maximum time in msec allowed for scan  */ +	__le32 max_scan_time; + +	/* +	 * delay in msec before sending first probe request after switching +	 * to a channel +	 */ +	__le32 probe_delay; + +	/* Scan control flags */ +	__le32 scan_ctrl_flags; + +	/* +	 * TLV (tag length value )  paramerters follow the scan_cmd structure. +	 * TLV can contain channel list, bssid list, ssid list and +	 * ie. the TLV tags are defined above; +	 */ +} __packed; + +  struct wmi_ssid_arg {  	int len;  	const u8 *ssid; @@ -1268,7 +1939,7 @@ struct wmi_scan_event {   * good idea to pass all the fields in the RX status   * descriptor up to the host.   */ -struct wmi_mgmt_rx_hdr { +struct wmi_mgmt_rx_hdr_v1 {  	__le32 channel;  	__le32 snr;  	__le32 rate; @@ -1277,8 +1948,18 @@ struct wmi_mgmt_rx_hdr {  	__le32 status; /* %WMI_RX_STATUS_ */  } __packed; -struct wmi_mgmt_rx_event { -	struct wmi_mgmt_rx_hdr hdr; +struct wmi_mgmt_rx_hdr_v2 { +	struct wmi_mgmt_rx_hdr_v1 v1; +	__le32 rssi_ctl[4]; +} __packed; + +struct wmi_mgmt_rx_event_v1 { +	struct wmi_mgmt_rx_hdr_v1 hdr; +	u8 buf[0]; +} __packed; + +struct wmi_mgmt_rx_event_v2 { +	struct wmi_mgmt_rx_hdr_v2 hdr;  	u8 buf[0];  } __packed; @@ -1288,6 +1969,10 @@ struct wmi_mgmt_rx_event {  #define WMI_RX_STATUS_ERR_MIC			0x10  #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS	0x20 +#define PHY_ERROR_SPECTRAL_SCAN		0x26 +#define PHY_ERROR_FALSE_RADAR_EXT		0x24 +#define PHY_ERROR_RADAR				0x05 +  struct wmi_single_phyerr_rx_hdr {  	/* TSF timestamp */  	__le32 tsf_timestamp; @@ -1379,6 +2064,87 @@ struct wmi_comb_phyerr_rx_event {  	u8 bufp[0];  } __packed; +#define PHYERR_TLV_SIG				0xBB +#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT	0xFB +#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY	0xF8 + +struct phyerr_radar_report { +	__le32 reg0; /* RADAR_REPORT_REG0_* */ +	__le32 reg1; /* REDAR_REPORT_REG1_* */ +} __packed; + +#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK		0x80000000 +#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB		31 + +#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK	0x40000000 +#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB	30 + +#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK		0x3FF00000 +#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB		20 + +#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK		0x000F0000 +#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB		16 + +#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK		0x0000FC00 +#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB		10 + +#define RADAR_REPORT_REG0_PULSE_SIDX_MASK		0x000003FF +#define RADAR_REPORT_REG0_PULSE_SIDX_LSB		0 + +#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK	0x80000000 +#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB	31 + +#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK	0x7F000000 +#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB		24 + +#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK	0x00FF0000 +#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB	16 + +#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK		0x0000FF00 +#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB		8 + +#define RADAR_REPORT_REG1_PULSE_DUR_MASK		0x000000FF +#define RADAR_REPORT_REG1_PULSE_DUR_LSB			0 + +struct phyerr_fft_report { +	__le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */ +	__le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */ +} __packed; + +#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK	0xFF800000 +#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB	23 + +#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK		0x007FC000 +#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB		14 + +#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK		0x00003000 +#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB		12 + +#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK		0x00000FFF +#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB		0 + +#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK		0xFC000000 +#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB		26 + +#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK		0x03FC0000 +#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB		18 + +#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK		0x0003FF00 +#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB		8 + +#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK	0x000000FF +#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB	0 + + +struct phyerr_tlv { +	__le16 len; +	u8 tag; +	u8 sig; +} __packed; + +#define DFS_RSSI_POSSIBLY_FALSE			50 +#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE	40 +  struct wmi_mgmt_tx_hdr {  	__le32 vdev_id;  	struct wmi_mac_addr peer_macaddr; @@ -1409,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {  	__le32 conformance_test_limit_5G;  } __packed; +enum wmi_dfs_region { +	/* Uninitialized dfs domain */ +	WMI_UNINIT_DFS_DOMAIN = 0, + +	/* FCC3 dfs domain */ +	WMI_FCC_DFS_DOMAIN = 1, + +	/* ETSI dfs domain */ +	WMI_ETSI_DFS_DOMAIN = 2, + +	/*Japan dfs domain */ +	WMI_MKK4_DFS_DOMAIN = 3, +}; + +struct wmi_pdev_set_regdomain_cmd_10x { +	__le32 reg_domain; +	__le32 reg_domain_2G; +	__le32 reg_domain_5G; +	__le32 conformance_test_limit_2G; +	__le32 conformance_test_limit_5G; + +	/* dfs domain from wmi_dfs_region */ +	__le32 dfs_domain; +} __packed; +  /* Command to set/unset chip in quiet mode */  struct wmi_pdev_set_quiet_cmd {  	/* period in TUs */ @@ -1434,6 +2225,19 @@ enum ath10k_protmode {  	ATH10K_PROT_RTSCTS   = 2,    /* RTS-CTS */  }; +enum wmi_rtscts_profile { +	WMI_RTSCTS_FOR_NO_RATESERIES = 0, +	WMI_RTSCTS_FOR_SECOND_RATESERIES, +	WMI_RTSCTS_ACROSS_SW_RETRIES +}; + +#define WMI_RTSCTS_ENABLED		1 +#define WMI_RTSCTS_SET_MASK		0x0f +#define WMI_RTSCTS_SET_LSB		0 + +#define WMI_RTSCTS_PROFILE_MASK		0xf0 +#define WMI_RTSCTS_PROFILE_LSB		4 +  enum wmi_beacon_gen_mode {  	WMI_BEACON_STAGGERED_MODE = 0,  	WMI_BEACON_BURST_MODE = 1 @@ -1465,10 +2269,63 @@ struct wmi_csa_event {  #define VDEV_DEFAULT_STATS_UPDATE_PERIOD    500  #define PEER_DEFAULT_STATS_UPDATE_PERIOD    500 +struct wmi_pdev_param_map { +	u32 tx_chain_mask; +	u32 rx_chain_mask; +	u32 txpower_limit2g; +	u32 txpower_limit5g; +	u32 txpower_scale; +	u32 beacon_gen_mode; +	u32 beacon_tx_mode; +	u32 resmgr_offchan_mode; +	u32 protection_mode; +	u32 dynamic_bw; +	u32 non_agg_sw_retry_th; +	u32 agg_sw_retry_th; +	u32 sta_kickout_th; +	u32 ac_aggrsize_scaling; +	u32 ltr_enable; +	u32 ltr_ac_latency_be; +	u32 ltr_ac_latency_bk; +	u32 ltr_ac_latency_vi; +	u32 ltr_ac_latency_vo; +	u32 ltr_ac_latency_timeout; +	u32 ltr_sleep_override; +	u32 ltr_rx_override; +	u32 ltr_tx_activity_timeout; +	u32 l1ss_enable; +	u32 dsleep_enable; +	u32 pcielp_txbuf_flush; +	u32 pcielp_txbuf_watermark; +	u32 pcielp_txbuf_tmo_en; +	u32 pcielp_txbuf_tmo_value; +	u32 pdev_stats_update_period; +	u32 vdev_stats_update_period; +	u32 peer_stats_update_period; +	u32 bcnflt_stats_update_period; +	u32 pmf_qos; +	u32 arp_ac_override; +	u32 dcs; +	u32 ani_enable; +	u32 ani_poll_period; +	u32 ani_listen_period; +	u32 ani_ofdm_level; +	u32 ani_cck_level; +	u32 dyntxchain; +	u32 proxy_sta; +	u32 idle_ps_config; +	u32 power_gating_sleep; +	u32 fast_channel_reset; +	u32 burst_dur; +	u32 burst_enable; +}; + +#define WMI_PDEV_PARAM_UNSUPPORTED 0 +  enum wmi_pdev_param { -	/* TX chian mask */ +	/* TX chain mask */  	WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, -	/* RX chian mask */ +	/* RX chain mask */  	WMI_PDEV_PARAM_RX_CHAIN_MASK,  	/* TX power limit for 2G Radio */  	WMI_PDEV_PARAM_TXPOWER_LIMIT2G, @@ -1490,7 +2347,12 @@ enum wmi_pdev_param {  	 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS  	 */  	WMI_PDEV_PARAM_PROTECTION_MODE, -	/* Dynamic bandwidth 0: disable 1: enable */ +	/* +	 * Dynamic bandwidth - 0: disable, 1: enable +	 * +	 * When enabled HW rate control tries different bandwidths when +	 * retransmitting frames. +	 */  	WMI_PDEV_PARAM_DYNAMIC_BW,  	/* Non aggregrate/ 11g sw retry threshold.0-disable */  	WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, @@ -1564,6 +2426,97 @@ enum wmi_pdev_param {  	WMI_PDEV_PARAM_POWER_GATING_SLEEP,  }; +enum wmi_10x_pdev_param { +	/* TX chian mask */ +	WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1, +	/* RX chian mask */ +	WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, +	/* TX power limit for 2G Radio */ +	WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, +	/* TX power limit for 5G Radio */ +	WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, +	/* TX power scale */ +	WMI_10X_PDEV_PARAM_TXPOWER_SCALE, +	/* Beacon generation mode . 0: host, 1: target   */ +	WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, +	/* Beacon generation mode . 0: staggered 1: bursted   */ +	WMI_10X_PDEV_PARAM_BEACON_TX_MODE, +	/* +	 * Resource manager off chan mode . +	 * 0: turn off off chan mode. 1: turn on offchan mode +	 */ +	WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, +	/* +	 * Protection mode: +	 * 0: no protection 1:use CTS-to-self 2: use RTS/CTS +	 */ +	WMI_10X_PDEV_PARAM_PROTECTION_MODE, +	/* Dynamic bandwidth 0: disable 1: enable */ +	WMI_10X_PDEV_PARAM_DYNAMIC_BW, +	/* Non aggregrate/ 11g sw retry threshold.0-disable */ +	WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, +	/* aggregrate sw retry threshold. 0-disable*/ +	WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, +	/* Station kickout threshold (non of consecutive failures).0-disable */ +	WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, +	/* Aggerate size scaling configuration per AC */ +	WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, +	/* LTR enable */ +	WMI_10X_PDEV_PARAM_LTR_ENABLE, +	/* LTR latency for BE, in us */ +	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, +	/* LTR latency for BK, in us */ +	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, +	/* LTR latency for VI, in us */ +	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, +	/* LTR latency for VO, in us  */ +	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, +	/* LTR AC latency timeout, in ms */ +	WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, +	/* LTR platform latency override, in us */ +	WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, +	/* LTR-RX override, in us */ +	WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, +	/* Tx activity timeout for LTR, in us */ +	WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, +	/* L1SS state machine enable */ +	WMI_10X_PDEV_PARAM_L1SS_ENABLE, +	/* Deep sleep state machine enable */ +	WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, +	/* pdev level stats update period in ms */ +	WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, +	/* vdev level stats update period in ms */ +	WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, +	/* peer level stats update period in ms */ +	WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, +	/* beacon filter status update period */ +	WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, +	/* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */ +	WMI_10X_PDEV_PARAM_PMF_QOS, +	/* Access category on which ARP and DHCP frames are sent */ +	WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, +	/* DCS configuration */ +	WMI_10X_PDEV_PARAM_DCS, +	/* Enable/Disable ANI on target */ +	WMI_10X_PDEV_PARAM_ANI_ENABLE, +	/* configure the ANI polling period */ +	WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, +	/* configure the ANI listening period */ +	WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, +	/* configure OFDM immunity level */ +	WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, +	/* configure CCK immunity level */ +	WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, +	/* Enable/Disable CDD for 1x1 STAs in rate control module */ +	WMI_10X_PDEV_PARAM_DYNTXCHAIN, +	/* Enable/Disable Fast channel reset*/ +	WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, +	/* Set Bursting DUR */ +	WMI_10X_PDEV_PARAM_BURST_DUR, +	/* Set Bursting Enable*/ +	WMI_10X_PDEV_PARAM_BURST_ENABLE, +}; +  struct wmi_pdev_set_param_cmd {  	__le32 param_id;  	__le32 param_value; @@ -1757,6 +2710,9 @@ struct wal_dbg_tx_stats {  	/* wal pdev resets  */  	__le32 pdev_resets; +	/* frames dropped due to non-availability of stateless TIDs */ +	__le32 stateless_tid_alloc_failure; +  	__le32 phy_underrun;  	/* MPDU is more than txop limit */ @@ -1813,13 +2769,21 @@ enum wmi_stats_id {  	WMI_REQUEST_AP_STAT	= 0x02  }; +struct wlan_inst_rssi_args { +	__le16 cfg_retry_count; +	__le16 retry_count; +}; +  struct wmi_request_stats_cmd {  	__le32 stats_id; -	/* -	 * Space to add parameters like -	 * peer mac addr -	 */ +	__le32 vdev_id; + +	/* peer MAC address */ +	struct wmi_mac_addr peer_macaddr; + +	/* Instantaneous RSSI arguments */ +	struct wlan_inst_rssi_args inst_rssi_args;  } __packed;  /* Suspend option */ @@ -1870,7 +2834,7 @@ struct wmi_stats_event {   * PDEV statistics   * TODO: add all PDEV stats here   */ -struct wmi_pdev_stats { +struct wmi_pdev_stats_old {  	__le32 chan_nf;        /* Channel noise floor */  	__le32 tx_frame_count; /* TX frame count */  	__le32 rx_frame_count; /* RX frame count */ @@ -1881,6 +2845,23 @@ struct wmi_pdev_stats {  	struct wal_dbg_stats wal; /* WAL dbg stats */  } __packed; +struct wmi_pdev_stats_10x { +	__le32 chan_nf;        /* Channel noise floor */ +	__le32 tx_frame_count; /* TX frame count */ +	__le32 rx_frame_count; /* RX frame count */ +	__le32 rx_clear_count; /* rx clear count */ +	__le32 cycle_count;    /* cycle count */ +	__le32 phy_err_count;  /* Phy error count */ +	__le32 chan_tx_pwr;    /* channel tx power */ +	struct wal_dbg_stats wal; /* WAL dbg stats */ +	__le32 ack_rx_bad; +	__le32 rts_bad; +	__le32 rts_good; +	__le32 fcs_bad; +	__le32 no_beacons; +	__le32 mib_int_count; +} __packed; +  /*   * VDEV statistics   * TODO: add all VDEV stats here @@ -1893,10 +2874,17 @@ struct wmi_vdev_stats {   * peer statistics.   * TODO: add more stats   */ -struct wmi_peer_stats { +struct wmi_peer_stats_old { +	struct wmi_mac_addr peer_macaddr; +	__le32 peer_rssi; +	__le32 peer_tx_rate; +} __packed; + +struct wmi_peer_stats_10x {  	struct wmi_mac_addr peer_macaddr;  	__le32 peer_rssi;  	__le32 peer_tx_rate; +	__le32 peer_rx_rate;  } __packed;  struct wmi_vdev_create_cmd { @@ -2077,6 +3065,18 @@ struct wmi_vdev_install_key_arg {  	const void *key_data;  }; +/* + * vdev fixed rate format: + * - preamble - b7:b6 - see WMI_RATE_PREMABLE_ + * - nss      - b5:b4 - ss number (0 mean 1ss) + * - rate_mcs - b3:b0 - as below + *    CCK:  0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps, + *          4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s) + *    OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps, + *          4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps + *    HT/VHT: MCS index + */ +  /* Preamble types to be used with VDEV fixed rate configuration */  enum wmi_rate_preamble {  	WMI_RATE_PREAMBLE_OFDM, @@ -2088,6 +3088,61 @@ enum wmi_rate_preamble {  /* Value to disable fixed rate setting */  #define WMI_FIXED_RATE_NONE    (0xff) +struct wmi_vdev_param_map { +	u32 rts_threshold; +	u32 fragmentation_threshold; +	u32 beacon_interval; +	u32 listen_interval; +	u32 multicast_rate; +	u32 mgmt_tx_rate; +	u32 slot_time; +	u32 preamble; +	u32 swba_time; +	u32 wmi_vdev_stats_update_period; +	u32 wmi_vdev_pwrsave_ageout_time; +	u32 wmi_vdev_host_swba_interval; +	u32 dtim_period; +	u32 wmi_vdev_oc_scheduler_air_time_limit; +	u32 wds; +	u32 atim_window; +	u32 bmiss_count_max; +	u32 bmiss_first_bcnt; +	u32 bmiss_final_bcnt; +	u32 feature_wmm; +	u32 chwidth; +	u32 chextoffset; +	u32 disable_htprotection; +	u32 sta_quickkickout; +	u32 mgmt_rate; +	u32 protection_mode; +	u32 fixed_rate; +	u32 sgi; +	u32 ldpc; +	u32 tx_stbc; +	u32 rx_stbc; +	u32 intra_bss_fwd; +	u32 def_keyid; +	u32 nss; +	u32 bcast_data_rate; +	u32 mcast_data_rate; +	u32 mcast_indicate; +	u32 dhcp_indicate; +	u32 unknown_dest_indicate; +	u32 ap_keepalive_min_idle_inactive_time_secs; +	u32 ap_keepalive_max_idle_inactive_time_secs; +	u32 ap_keepalive_max_unresponsive_time_secs; +	u32 ap_enable_nawds; +	u32 mcast2ucast_set; +	u32 enable_rtscts; +	u32 txbf; +	u32 packet_powersave; +	u32 drop_unencry; +	u32 tx_encap_type; +	u32 ap_detect_out_of_sync_sleeping_sta_time_secs; +}; + +#define WMI_VDEV_PARAM_UNSUPPORTED 0 +  /* the definition of different VDEV parameters */  enum wmi_vdev_param {  	/* RTS Threshold */ @@ -2219,6 +3274,121 @@ enum wmi_vdev_param {  	WMI_VDEV_PARAM_TX_ENCAP_TYPE,  }; +/* the definition of different VDEV parameters */ +enum wmi_10x_vdev_param { +	/* RTS Threshold */ +	WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1, +	/* Fragmentation threshold */ +	WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, +	/* beacon interval in TUs */ +	WMI_10X_VDEV_PARAM_BEACON_INTERVAL, +	/* Listen interval in TUs */ +	WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, +	/* muticast rate in Mbps */ +	WMI_10X_VDEV_PARAM_MULTICAST_RATE, +	/* management frame rate in Mbps */ +	WMI_10X_VDEV_PARAM_MGMT_TX_RATE, +	/* slot time (long vs short) */ +	WMI_10X_VDEV_PARAM_SLOT_TIME, +	/* preamble (long vs short) */ +	WMI_10X_VDEV_PARAM_PREAMBLE, +	/* SWBA time (time before tbtt in msec) */ +	WMI_10X_VDEV_PARAM_SWBA_TIME, +	/* time period for updating VDEV stats */ +	WMI_10X_VDEV_STATS_UPDATE_PERIOD, +	/* age out time in msec for frames queued for station in power save */ +	WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, +	/* +	 * Host SWBA interval (time in msec before tbtt for SWBA event +	 * generation). +	 */ +	WMI_10X_VDEV_HOST_SWBA_INTERVAL, +	/* DTIM period (specified in units of num beacon intervals) */ +	WMI_10X_VDEV_PARAM_DTIM_PERIOD, +	/* +	 * scheduler air time limit for this VDEV. used by off chan +	 * scheduler. +	 */ +	WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, +	/* enable/dsiable WDS for this VDEV  */ +	WMI_10X_VDEV_PARAM_WDS, +	/* ATIM Window */ +	WMI_10X_VDEV_PARAM_ATIM_WINDOW, +	/* BMISS max */ +	WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, +	/* WMM enables/disabled */ +	WMI_10X_VDEV_PARAM_FEATURE_WMM, +	/* Channel width */ +	WMI_10X_VDEV_PARAM_CHWIDTH, +	/* Channel Offset */ +	WMI_10X_VDEV_PARAM_CHEXTOFFSET, +	/* Disable HT Protection */ +	WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, +	/* Quick STA Kickout */ +	WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, +	/* Rate to be used with Management frames */ +	WMI_10X_VDEV_PARAM_MGMT_RATE, +	/* Protection Mode */ +	WMI_10X_VDEV_PARAM_PROTECTION_MODE, +	/* Fixed rate setting */ +	WMI_10X_VDEV_PARAM_FIXED_RATE, +	/* Short GI Enable/Disable */ +	WMI_10X_VDEV_PARAM_SGI, +	/* Enable LDPC */ +	WMI_10X_VDEV_PARAM_LDPC, +	/* Enable Tx STBC */ +	WMI_10X_VDEV_PARAM_TX_STBC, +	/* Enable Rx STBC */ +	WMI_10X_VDEV_PARAM_RX_STBC, +	/* Intra BSS forwarding  */ +	WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, +	/* Setting Default xmit key for Vdev */ +	WMI_10X_VDEV_PARAM_DEF_KEYID, +	/* NSS width */ +	WMI_10X_VDEV_PARAM_NSS, +	/* Set the custom rate for the broadcast data frames */ +	WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, +	/* Set the custom rate (rate-code) for multicast data frames */ +	WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, +	/* Tx multicast packet indicate Enable/Disable */ +	WMI_10X_VDEV_PARAM_MCAST_INDICATE, +	/* Tx DHCP packet indicate Enable/Disable */ +	WMI_10X_VDEV_PARAM_DHCP_INDICATE, +	/* Enable host inspection of Tx unicast packet to unknown destination */ +	WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + +	/* The minimum amount of time AP begins to consider STA inactive */ +	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + +	/* +	 * An associated STA is considered inactive when there is no recent +	 * TX/RX activity and no downlink frames are buffered for it. Once a +	 * STA exceeds the maximum idle inactive time, the AP will send an +	 * 802.11 data-null as a keep alive to verify the STA is still +	 * associated. If the STA does ACK the data-null, or if the data-null +	 * is buffered and the STA does not retrieve it, the STA will be +	 * considered unresponsive +	 * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS). +	 */ +	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + +	/* +	 * An associated STA is considered unresponsive if there is no recent +	 * TX/RX activity and downlink frames are buffered for it. Once a STA +	 * exceeds the maximum unresponsive time, the AP will send a +	 * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */ +	WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + +	/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ +	WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, + +	WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, +	/* Enable/Disable RTS-CTS */ +	WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, + +	WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, +}; +  /* slot time long */  #define WMI_VDEV_SLOT_TIME_LONG		0x1  /* slot time short */ @@ -2295,6 +3465,24 @@ struct wmi_bcn_tx_arg {  	const void *bcn;  }; +enum wmi_bcn_tx_ref_flags { +	WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1, +	WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2, +}; + +struct wmi_bcn_tx_ref_cmd { +	__le32 vdev_id; +	__le32 data_len; +	/* physical address of the frame - dma pointer */ +	__le32 data_ptr; +	/* id for host to track */ +	__le32 msdu_id; +	/* frame ctrl to setup PPDU desc */ +	__le32 frame_control; +	/* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */ +	__le32 flags; +} __packed; +  /* Beacon filter */  #define WMI_BCN_FILTER_ALL   0 /* Filter all beacons */  #define WMI_BCN_FILTER_NONE  1 /* Pass all beacons */ @@ -2751,6 +3939,12 @@ enum wmi_peer_smps_state {  	WMI_PEER_SMPS_DYNAMIC = 0x2  }; +enum wmi_peer_chwidth { +	WMI_PEER_CHWIDTH_20MHZ = 0, +	WMI_PEER_CHWIDTH_40MHZ = 1, +	WMI_PEER_CHWIDTH_80MHZ = 2, +}; +  enum wmi_peer_param {  	WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */  	WMI_PEER_AMPDU      = 0x2, @@ -2931,6 +4125,10 @@ struct wmi_chan_info_event {  	__le32 cycle_count;  } __packed; +struct wmi_peer_sta_kickout_event { +	struct wmi_mac_addr peer_macaddr; +} __packed; +  #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)  /* FIXME: empirically extrapolated */ @@ -2994,13 +4192,60 @@ struct wmi_force_fw_hang_cmd {  	__le32 delay_ms;  } __packed; +enum ath10k_dbglog_level { +	ATH10K_DBGLOG_LEVEL_VERBOSE = 0, +	ATH10K_DBGLOG_LEVEL_INFO = 1, +	ATH10K_DBGLOG_LEVEL_WARN = 2, +	ATH10K_DBGLOG_LEVEL_ERR = 3, +}; + +/* VAP ids to enable dbglog */ +#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB		0 +#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK		0x0000ffff + +/* to enable dbglog in the firmware */ +#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB	16 +#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK	0x00010000 + +/* timestamp resolution */ +#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB	17 +#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK	0x000E0000 + +/* number of queued messages before sending them to the host */ +#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB	20 +#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK	0x0ff00000 + +/* + * Log levels to enable. This defines the minimum level to enable, this is + * not a bitmask. See enum ath10k_dbglog_level for the values. + */ +#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB		28 +#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK		0x70000000 + +/* + * Note: this is a cleaned up version of a struct firmware uses. For + * example, config_valid was hidden inside an array. + */ +struct wmi_dbglog_cfg_cmd { +	/* bitmask to hold mod id config*/ +	__le32 module_enable; + +	/* see ATH10K_DBGLOG_CFG_ */ +	__le32 config_enable; + +	/* mask of module id bits to be changed */ +	__le32 module_valid; + +	/* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */ +	__le32 config_valid; +} __packed; +  #define ATH10K_RTS_MAX		2347  #define ATH10K_FRAGMT_THRESHOLD_MIN	540  #define ATH10K_FRAGMT_THRESHOLD_MAX	2346  #define WMI_MAX_EVENT 0x1000  /* Maximum number of pending TXed WMI packets */ -#define WMI_MAX_PENDING_TX_COUNT 128  #define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)  /* By default disable power save for IBSS */ @@ -3013,17 +4258,16 @@ int ath10k_wmi_attach(struct ath10k *ar);  void ath10k_wmi_detach(struct ath10k *ar);  int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);  int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); -void ath10k_wmi_flush_tx(struct ath10k *ar); -int ath10k_wmi_connect_htc_service(struct ath10k *ar); +int ath10k_wmi_connect(struct ath10k *ar);  int ath10k_wmi_pdev_set_channel(struct ath10k *ar,  				const struct wmi_channel_arg *); -int ath10k_wmi_pdev_suspend_target(struct ath10k *ar); +int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);  int ath10k_wmi_pdev_resume_target(struct ath10k *ar);  int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, -				  u16 rd5g, u16 ctl2g, u16 ctl5g); -int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, -			      u32 value); +				  u16 rd5g, u16 ctl2g, u16 ctl5g, +				  enum wmi_dfs_region dfs_reg); +int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);  int ath10k_wmi_cmd_init(struct ath10k *ar);  int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);  void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); @@ -3043,7 +4287,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,  		       const u8 *bssid);  int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);  int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, -			      enum wmi_vdev_param param_id, u32 param_value); +			      u32 param_id, u32 param_value);  int ath10k_wmi_vdev_install_key(struct ath10k *ar,  				const struct wmi_vdev_install_key_arg *arg);  int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, @@ -3066,11 +4310,13 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,  			       enum wmi_ap_ps_peer_param param_id, u32 value);  int ath10k_wmi_scan_chan_list(struct ath10k *ar,  			      const struct wmi_scan_chan_list_arg *arg); -int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg); +int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);  int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,  			const struct wmi_pdev_set_wmm_params_arg *arg);  int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);  int ath10k_wmi_force_fw_hang(struct ath10k *ar,  			     enum wmi_force_fw_hang_type type, u32 delay_ms); +int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);  #endif /* _WMI_H_ */  | 
