diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/trans.c')
| -rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/trans.c | 688 | 
1 files changed, 547 insertions, 141 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index bad95d28d50..788085bc65d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -5,7 +5,7 @@   *   * GPL LICENSE SUMMARY   * - * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@   *   * BSD LICENSE   * - * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.   * All rights reserved.   *   * Redistribution and use in source and binary forms, with or without @@ -73,33 +73,21 @@  #include "iwl-csr.h"  #include "iwl-prph.h"  #include "iwl-agn-hw.h" +#include "iwl-fw-error-dump.h"  #include "internal.h" -static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, -						  u32 reg, u32 mask, u32 value) +static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)  { -	u32 v; - -#ifdef CONFIG_IWLWIFI_DEBUG -	WARN_ON_ONCE(value & ~mask); -#endif - -	v = iwl_read32(trans, reg); -	v &= ~mask; -	v |= value; -	iwl_write32(trans, reg, v); +	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, +		    ((reg & 0x0000ffff) | (2 << 28))); +	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);  } -static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, -					      u32 reg, u32 mask) +static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)  { -	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); -} - -static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, -					    u32 reg, u32 mask) -{ -	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); +	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); +	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, +		    ((reg & 0x0000ffff) | (3 << 28)));  }  static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) @@ -150,7 +138,6 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)   */  static int iwl_pcie_apm_init(struct iwl_trans *trans)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int ret = 0;  	IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); @@ -160,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)  	 */  	/* Disable L0S exit timer (platform NMI Work/Around) */ -	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, -		    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); +	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) +		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, +			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);  	/*  	 * Disable L0s without affecting L1; @@ -206,26 +194,165 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)  		goto out;  	} +	if (trans->cfg->host_interrupt_operation_mode) { +		/* +		 * This is a bit of an abuse - This is needed for 7260 / 3160 +		 * only check host_interrupt_operation_mode even if this is +		 * not related to host_interrupt_operation_mode. +		 * +		 * Enable the oscillator to count wake up time for L1 exit. This +		 * consumes slightly more power (100uA) - but allows to be sure +		 * that we wake up from L1 on time. +		 * +		 * This looks weird: read twice the same register, discard the +		 * value, set a bit, and yet again, read that same register +		 * just to discard the value. But that's the way the hardware +		 * seems to like it. +		 */ +		iwl_read_prph(trans, OSC_CLK); +		iwl_read_prph(trans, OSC_CLK); +		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); +		iwl_read_prph(trans, OSC_CLK); +		iwl_read_prph(trans, OSC_CLK); +	} +  	/*  	 * Enable DMA clock and wait for it to stabilize.  	 * -	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits -	 * do not disable clocks.  This preserves any hardware bits already -	 * set by default in "CLK_CTRL_REG" after reset. +	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" +	 * bits do not disable clocks.  This preserves any hardware +	 * bits already set by default in "CLK_CTRL_REG" after reset.  	 */ -	iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); -	udelay(20); +	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { +		iwl_write_prph(trans, APMG_CLK_EN_REG, +			       APMG_CLK_VAL_DMA_CLK_RQT); +		udelay(20); + +		/* Disable L1-Active */ +		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, +				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS); -	/* Disable L1-Active */ -	iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, -			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS); +		/* Clear the interrupt in APMG if the NIC is in RFKILL */ +		iwl_write_prph(trans, APMG_RTC_INT_STT_REG, +			       APMG_RTC_INT_STT_RFKILL); +	} -	set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); +	set_bit(STATUS_DEVICE_ENABLED, &trans->status);  out:  	return ret;  } +/* + * Enable LP XTAL to avoid HW bug where device may consume much power if + * FW is not loaded after device reset. LP XTAL is disabled by default + * after device HW reset. Do it only if XTAL is fed by internal source. + * Configure device's "persistence" mode to avoid resetting XTAL again when + * SHRD_HW_RST occurs in S3. + */ +static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) +{ +	int ret; +	u32 apmg_gp1_reg; +	u32 apmg_xtal_cfg_reg; +	u32 dl_cfg_reg; + +	/* Force XTAL ON */ +	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, +				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + +	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */ +	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + +	udelay(10); + +	/* +	 * Set "initialization complete" bit to move adapter from +	 * D0U* --> D0A* (powered-up active) state. +	 */ +	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + +	/* +	 * Wait for clock stabilization; once stabilized, access to +	 * device-internal resources is possible. +	 */ +	ret = iwl_poll_bit(trans, CSR_GP_CNTRL, +			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, +			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, +			   25000); +	if (WARN_ON(ret < 0)) { +		IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); +		/* Release XTAL ON request */ +		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, +					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON); +		return; +	} + +	/* +	 * Clear "disable persistence" to avoid LP XTAL resetting when +	 * SHRD_HW_RST is applied in S3. +	 */ +	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, +				    APMG_PCIDEV_STT_VAL_PERSIST_DIS); + +	/* +	 * Force APMG XTAL to be active to prevent its disabling by HW +	 * caused by APMG idle state. +	 */ +	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, +						    SHR_APMG_XTAL_CFG_REG); +	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, +				 apmg_xtal_cfg_reg | +				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); + +	/* +	 * Reset entire device again - do controller reset (results in +	 * SHRD_HW_RST). Turn MAC off before proceeding. +	 */ +	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + +	udelay(10); + +	/* Enable LP XTAL by indirect access through CSR */ +	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); +	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | +				 SHR_APMG_GP1_WF_XTAL_LP_EN | +				 SHR_APMG_GP1_CHICKEN_BIT_SELECT); + +	/* Clear delay line clock power up */ +	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); +	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & +				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); + +	/* +	 * Enable persistence mode to avoid LP XTAL resetting when +	 * SHRD_HW_RST is applied in S3. +	 */ +	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, +		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + +	/* +	 * Clear "initialization complete" bit to move adapter from +	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. +	 */ +	iwl_clear_bit(trans, CSR_GP_CNTRL, +		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + +	/* Activates XTAL resources monitor */ +	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, +				 CSR_MONITOR_XTAL_RESOURCES); + +	/* Release XTAL ON request */ +	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, +				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON); +	udelay(10); + +	/* Release APMG XTAL */ +	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, +				 apmg_xtal_cfg_reg & +				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); +} +  static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)  {  	int ret = 0; @@ -246,14 +373,18 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)  static void iwl_pcie_apm_stop(struct iwl_trans *trans)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); -	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); +	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);  	/* Stop device's DMA activity */  	iwl_pcie_apm_stop_master(trans); +	if (trans->cfg->lp_xtal_workaround) { +		iwl_pcie_apm_lp_xtal_enable(trans); +		return; +	} +  	/* Reset the entire device */  	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); @@ -270,18 +401,15 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)  static int iwl_pcie_nic_init(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	unsigned long flags;  	/* nic_init */ -	spin_lock_irqsave(&trans_pcie->irq_lock, flags); +	spin_lock(&trans_pcie->irq_lock);  	iwl_pcie_apm_init(trans); -	/* Set interrupt coalescing calibration timer to default (512 usecs) */ -	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); - -	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); +	spin_unlock(&trans_pcie->irq_lock); -	iwl_pcie_set_pwr(trans, false); +	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) +		iwl_pcie_set_pwr(trans, false);  	iwl_op_mode_nic_config(trans->op_mode); @@ -326,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)  {  	int ret;  	int t = 0; +	int iter;  	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); @@ -334,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)  	if (ret >= 0)  		return 0; -	/* If HW is not ready, prepare the conditions to check again */ -	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, -		    CSR_HW_IF_CONFIG_REG_PREPARE); +	for (iter = 0; iter < 10; iter++) { +		/* If HW is not ready, prepare the conditions to check again */ +		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, +			    CSR_HW_IF_CONFIG_REG_PREPARE); -	do { -		ret = iwl_pcie_set_hw_ready(trans); -		if (ret >= 0) -			return 0; +		do { +			ret = iwl_pcie_set_hw_ready(trans); +			if (ret >= 0) +				return 0; -		usleep_range(200, 1000); -		t += 200; -	} while (t < 150000); +			usleep_range(200, 1000); +			t += 200; +		} while (t < 150000); +		msleep(25); +	} + +	IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);  	return ret;  } @@ -443,22 +577,181 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,  	return ret;  } -static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, -				const struct fw_img *image) +static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans, +					      const struct fw_img *image, +					      int cpu, +					      int *first_ucode_section)  { +	int shift_param;  	int i, ret = 0; +	u32 last_read_idx = 0; + +	if (cpu == 1) { +		shift_param = 0; +		*first_ucode_section = 0; +	} else { +		shift_param = 16; +		(*first_ucode_section)++; +	} -	for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) { -		if (!image->sec[i].data) +	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { +		last_read_idx = i; + +		if (!image->sec[i].data || +		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { +			IWL_DEBUG_FW(trans, +				     "Break since Data not valid or Empty section, sec = %d\n", +				     i);  			break; +		} + +		if (i == (*first_ucode_section) + 1) +			/* set CPU to started */ +			iwl_set_bits_prph(trans, +					  CSR_UCODE_LOAD_STATUS_ADDR, +					  LMPM_CPU_HDRS_LOADING_COMPLETED +					  << shift_param);  		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);  		if (ret)  			return ret;  	} +	/* image loading complete */ +	iwl_set_bits_prph(trans, +			  CSR_UCODE_LOAD_STATUS_ADDR, +			  LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param); -	/* Remove all resets to allow NIC to operate */ -	iwl_write32(trans, CSR_RESET, 0); +	*first_ucode_section = last_read_idx; + +	return 0; +} + +static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, +				      const struct fw_img *image, +				      int cpu, +				      int *first_ucode_section) +{ +	int shift_param; +	int i, ret = 0; +	u32 last_read_idx = 0; + +	if (cpu == 1) { +		shift_param = 0; +		*first_ucode_section = 0; +	} else { +		shift_param = 16; +		(*first_ucode_section)++; +	} + +	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { +		last_read_idx = i; + +		if (!image->sec[i].data || +		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { +			IWL_DEBUG_FW(trans, +				     "Break since Data not valid or Empty section, sec = %d\n", +				     i); +			break; +		} + +		ret = iwl_pcie_load_section(trans, i, &image->sec[i]); +		if (ret) +			return ret; +	} + +	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) +		iwl_set_bits_prph(trans, +				  CSR_UCODE_LOAD_STATUS_ADDR, +				  (LMPM_CPU_UCODE_LOADING_COMPLETED | +				   LMPM_CPU_HDRS_LOADING_COMPLETED | +				   LMPM_CPU_UCODE_LOADING_STARTED) << +					shift_param); + +	*first_ucode_section = last_read_idx; + +	return 0; +} + +static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, +				const struct fw_img *image) +{ +	int ret = 0; +	int first_ucode_section; + +	IWL_DEBUG_FW(trans, +		     "working with %s image\n", +		     image->is_secure ? "Secured" : "Non Secured"); +	IWL_DEBUG_FW(trans, +		     "working with %s CPU\n", +		     image->is_dual_cpus ? "Dual" : "Single"); + +	/* configure the ucode to be ready to get the secured image */ +	if (image->is_secure) { +		/* set secure boot inspector addresses */ +		iwl_write_prph(trans, +			       LMPM_SECURE_INSPECTOR_CODE_ADDR, +			       LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE); + +		iwl_write_prph(trans, +			       LMPM_SECURE_INSPECTOR_DATA_ADDR, +			       LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE); + +		/* set CPU1 header address */ +		iwl_write_prph(trans, +			       LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR, +			       LMPM_SECURE_CPU1_HDR_MEM_SPACE); + +		/* load to FW the binary Secured sections of CPU1 */ +		ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1, +							 &first_ucode_section); +		if (ret) +			return ret; + +	} else { +		/* load to FW the binary Non secured sections of CPU1 */ +		ret = iwl_pcie_load_cpu_sections(trans, image, 1, +						 &first_ucode_section); +		if (ret) +			return ret; +	} + +	if (image->is_dual_cpus) { +		/* set CPU2 header address */ +		iwl_write_prph(trans, +			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, +			       LMPM_SECURE_CPU2_HDR_MEM_SPACE); + +		/* load to FW the binary sections of CPU2 */ +		if (image->is_secure) +			ret = iwl_pcie_load_cpu_secured_sections( +							trans, image, 2, +							&first_ucode_section); +		else +			ret = iwl_pcie_load_cpu_sections(trans, image, 2, +							 &first_ucode_section); +		if (ret) +			return ret; +	} + +	/* release CPU reset */ +	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) +		iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); +	else +		iwl_write32(trans, CSR_RESET, 0); + +	if (image->is_secure) { +		/* wait for image verification to complete  */ +		ret = iwl_poll_prph_bit(trans, +					LMPM_SECURE_BOOT_CPU1_STATUS_ADDR, +					LMPM_SECURE_BOOT_STATUS_SUCCESS, +					LMPM_SECURE_BOOT_STATUS_SUCCESS, +					LMPM_SECURE_TIME_OUT); + +		if (ret < 0) { +			IWL_ERR(trans, "Time out on secure boot process\n"); +			return ret; +		} +	}  	return 0;  } @@ -466,7 +759,6 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,  static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,  				   const struct fw_img *fw, bool run_in_rfkill)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	int ret;  	bool hw_rfkill; @@ -476,17 +768,15 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,  		return -EIO;  	} -	clear_bit(STATUS_FW_ERROR, &trans_pcie->status); -  	iwl_enable_rfkill_int(trans);  	/* If platform's RF_KILL switch is NOT set to KILL */  	hw_rfkill = iwl_is_rfkill_set(trans);  	if (hw_rfkill) -		set_bit(STATUS_RFKILL, &trans_pcie->status); +		set_bit(STATUS_RFKILL, &trans->status);  	else -		clear_bit(STATUS_RFKILL, &trans_pcie->status); -	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); +		clear_bit(STATUS_RFKILL, &trans->status); +	iwl_trans_pcie_rf_kill(trans, hw_rfkill);  	if (hw_rfkill && !run_in_rfkill)  		return -ERFKILL; @@ -524,12 +814,14 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)  static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	unsigned long flags; +	bool hw_rfkill, was_hw_rfkill; + +	was_hw_rfkill = iwl_is_rfkill_set(trans);  	/* tell the device to stop sending interrupts */ -	spin_lock_irqsave(&trans_pcie->irq_lock, flags); +	spin_lock(&trans_pcie->irq_lock);  	iwl_disable_interrupts(trans); -	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); +	spin_unlock(&trans_pcie->irq_lock);  	/* device going down, Stop using ICT table */  	iwl_pcie_disable_ict(trans); @@ -541,7 +833,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)  	 * restart. So don't process again if the device is  	 * already dead.  	 */ -	if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { +	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {  		iwl_pcie_tx_stop(trans);  		iwl_pcie_rx_stop(trans); @@ -561,21 +853,51 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)  	/* Upon stop, the APM issues an interrupt if HW RF kill is set.  	 * Clean again the interrupt here  	 */ -	spin_lock_irqsave(&trans_pcie->irq_lock, flags); +	spin_lock(&trans_pcie->irq_lock);  	iwl_disable_interrupts(trans); -	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); - -	iwl_enable_rfkill_int(trans); +	spin_unlock(&trans_pcie->irq_lock);  	/* stop and reset the on-board processor */  	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);  	/* clear all status bits */ -	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); -	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status); -	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); -	clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); -	clear_bit(STATUS_RFKILL, &trans_pcie->status); +	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); +	clear_bit(STATUS_INT_ENABLED, &trans->status); +	clear_bit(STATUS_DEVICE_ENABLED, &trans->status); +	clear_bit(STATUS_TPOWER_PMI, &trans->status); +	clear_bit(STATUS_RFKILL, &trans->status); + +	/* +	 * Even if we stop the HW, we still want the RF kill +	 * interrupt +	 */ +	iwl_enable_rfkill_int(trans); + +	/* +	 * Check again since the RF kill state may have changed while +	 * all the interrupts were disabled, in this case we couldn't +	 * receive the RF kill interrupt and update the state in the +	 * op_mode. +	 * Don't call the op_mode if the rkfill state hasn't changed. +	 * This allows the op_mode to call stop_device from the rfkill +	 * notification without endless recursion. Under very rare +	 * circumstances, we might have a small recursion if the rfkill +	 * state changed exactly now while we were called from stop_device. +	 * This is very unlikely but can happen and is supported. +	 */ +	hw_rfkill = iwl_is_rfkill_set(trans); +	if (hw_rfkill) +		set_bit(STATUS_RFKILL, &trans->status); +	else +		clear_bit(STATUS_RFKILL, &trans->status); +	if (hw_rfkill != was_hw_rfkill) +		iwl_trans_pcie_rf_kill(trans, hw_rfkill); +} + +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) +{ +	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) +		iwl_trans_pcie_stop_device(trans);  }  static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) @@ -660,7 +982,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,  static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	bool hw_rfkill;  	int err; @@ -671,7 +992,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)  	}  	/* Reset the entire device */ -	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); +	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);  	usleep_range(10, 15); @@ -682,53 +1003,30 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)  	hw_rfkill = iwl_is_rfkill_set(trans);  	if (hw_rfkill) -		set_bit(STATUS_RFKILL, &trans_pcie->status); +		set_bit(STATUS_RFKILL, &trans->status);  	else -		clear_bit(STATUS_RFKILL, &trans_pcie->status); -	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); +		clear_bit(STATUS_RFKILL, &trans->status); +	iwl_trans_pcie_rf_kill(trans, hw_rfkill);  	return 0;  } -static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, -				   bool op_mode_leaving) +static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -	bool hw_rfkill; -	unsigned long flags; -	spin_lock_irqsave(&trans_pcie->irq_lock, flags); +	/* disable interrupts - don't enable HW RF kill interrupt */ +	spin_lock(&trans_pcie->irq_lock);  	iwl_disable_interrupts(trans); -	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); +	spin_unlock(&trans_pcie->irq_lock);  	iwl_pcie_apm_stop(trans); -	spin_lock_irqsave(&trans_pcie->irq_lock, flags); +	spin_lock(&trans_pcie->irq_lock);  	iwl_disable_interrupts(trans); -	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); +	spin_unlock(&trans_pcie->irq_lock);  	iwl_pcie_disable_ict(trans); - -	if (!op_mode_leaving) { -		/* -		 * Even if we stop the HW, we still want the RF kill -		 * interrupt -		 */ -		iwl_enable_rfkill_int(trans); - -		/* -		 * Check again since the RF kill state may have changed while -		 * all the interrupts were disabled, in this case we couldn't -		 * receive the RF kill interrupt and update the state in the -		 * op_mode. -		 */ -		hw_rfkill = iwl_is_rfkill_set(trans); -		if (hw_rfkill) -			set_bit(STATUS_RFKILL, &trans_pcie->status); -		else -			clear_bit(STATUS_RFKILL, &trans_pcie->status); -		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); -	}  }  static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -761,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,  	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);  } +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +{ +	WARN_ON(1); +	return 0; +} +  static void iwl_trans_pcie_configure(struct iwl_trans *trans,  				     const struct iwl_trans_config *trans_cfg)  { @@ -787,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,  	trans_pcie->command_names = trans_cfg->command_names;  	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; + +	/* Initialize NAPI here - it should be before registering to mac80211 +	 * in the opmode but after the HW struct is allocated. +	 * As this function may be called again in some corner cases don't +	 * do anything if NAPI was already initialized. +	 */ +	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) { +		init_dummy_netdev(&trans_pcie->napi_dev); +		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi, +				     &trans_pcie->napi_dev, +				     iwl_pcie_dummy_napi_poll, 64); +	}  }  void iwl_trans_pcie_free(struct iwl_trans *trans) @@ -807,17 +1123,18 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)  	pci_disable_device(trans_pcie->pci_dev);  	kmem_cache_destroy(trans->dev_cmd_pool); +	if (trans_pcie->napi.poll) +		netif_napi_del(&trans_pcie->napi); +  	kfree(trans);  }  static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)  { -	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); -  	if (state) -		set_bit(STATUS_TPOWER_PMI, &trans_pcie->status); +		set_bit(STATUS_TPOWER_PMI, &trans->status);  	else -		clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); +		clear_bit(STATUS_TPOWER_PMI, &trans->status);  }  static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, @@ -828,6 +1145,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,  	spin_lock_irqsave(&trans_pcie->reg_lock, *flags); +	if (trans_pcie->cmd_in_flight) +		goto out; +  	/* this bit wakes up the NIC */  	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,  				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); @@ -867,6 +1187,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,  		}  	} +out:  	/*  	 * Fool sparse by faking we release the lock - sparse will  	 * track nic_access anyway. @@ -888,6 +1209,9 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,  	 */  	__acquire(&trans_pcie->reg_lock); +	if (trans_pcie->cmd_in_flight) +		goto out; +  	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,  				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);  	/* @@ -897,6 +1221,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,  	 * scheduled on different CPUs (after we drop reg_lock).  	 */  	mmiowb(); +out:  	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);  } @@ -939,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,  #define IWL_FLUSH_WAIT_MS	2000 -static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) +static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)  {  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);  	struct iwl_txq *txq; @@ -952,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)  	/* waiting for all the tx frames complete might take a while */  	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { +		u8 wr_ptr; +  		if (cnt == trans_pcie->cmd_queue)  			continue; +		if (!test_bit(cnt, trans_pcie->queue_used)) +			continue; +		if (!(BIT(cnt) & txq_bm)) +			continue; + +		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);  		txq = &trans_pcie->txq[cnt];  		q = &txq->q; -		while (q->read_ptr != q->write_ptr && !time_after(jiffies, -		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) +		wr_ptr = ACCESS_ONCE(q->write_ptr); + +		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) && +		       !time_after(jiffies, +				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { +			u8 write_ptr = ACCESS_ONCE(q->write_ptr); + +			if (WARN_ONCE(wr_ptr != write_ptr, +				      "WR pointer moved while flushing %d -> %d\n", +				      wr_ptr, write_ptr)) +				return -ETIMEDOUT;  			msleep(1); +		}  		if (q->read_ptr != q->write_ptr) {  			IWL_ERR(trans, @@ -966,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)  			ret = -ETIMEDOUT;  			break;  		} +		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);  	}  	if (!ret) @@ -1000,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)  		IWL_ERR(trans,  			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",  			cnt, active ? "" : "in", fifo, tbl_dw, -			iwl_read_prph(trans, -				      SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1), +			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & +				(TFD_QUEUE_SIZE_MAX - 1),  			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));  	} @@ -1045,6 +1389,7 @@ static const char *get_csr_string(int cmd)  	IWL_CMD(CSR_GIO_CHICKEN_BITS);  	IWL_CMD(CSR_ANA_PLL_CFG);  	IWL_CMD(CSR_HW_REV_WA_REG); +	IWL_CMD(CSR_MONITOR_STATUS_REG);  	IWL_CMD(CSR_DBG_HPET_MEM_REG);  	default:  		return "UNKNOWN"; @@ -1077,6 +1422,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)  		CSR_DRAM_INT_TBL_REG,  		CSR_GIO_CHICKEN_BITS,  		CSR_ANA_PLL_CFG, +		CSR_MONITOR_STATUS_REG,  		CSR_HW_REV_WA_REG,  		CSR_DBG_HPET_MEM_REG  	}; @@ -1294,16 +1640,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,  {  	struct iwl_trans *trans = file->private_data;  	char *buf = NULL; -	int pos = 0; -	ssize_t ret = -EFAULT; - -	ret = pos = iwl_dump_fh(trans, &buf); -	if (buf) { -		ret = simple_read_from_buffer(user_buf, -					      count, ppos, buf, pos); -		kfree(buf); -	} +	ssize_t ret; +	ret = iwl_dump_fh(trans, &buf); +	if (ret < 0) +		return ret; +	if (!buf) +		return -EINVAL; +	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); +	kfree(buf);  	return ret;  } @@ -1331,6 +1676,61 @@ err:  	IWL_ERR(trans, "failed to create the trans debugfs entry\n");  	return -ENOMEM;  } + +static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) +{ +	u32 cmdlen = 0; +	int i; + +	for (i = 0; i < IWL_NUM_OF_TBS; i++) +		cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i); + +	return cmdlen; +} + +static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans, +				    void *buf, u32 buflen) +{ +	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); +	struct iwl_fw_error_dump_data *data; +	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; +	struct iwl_fw_error_dump_txcmd *txcmd; +	u32 len; +	int i, ptr; + +	if (!buf) +		return sizeof(*data) + +		       cmdq->q.n_window * (sizeof(*txcmd) + +					   TFD_MAX_PAYLOAD_SIZE); + +	len = 0; +	data = buf; +	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); +	txcmd = (void *)data->data; +	spin_lock_bh(&cmdq->lock); +	ptr = cmdq->q.write_ptr; +	for (i = 0; i < cmdq->q.n_window; i++) { +		u8 idx = get_cmd_index(&cmdq->q, ptr); +		u32 caplen, cmdlen; + +		cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]); +		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); + +		if (cmdlen) { +			len += sizeof(*txcmd) + caplen; +			txcmd->cmdlen = cpu_to_le32(cmdlen); +			txcmd->caplen = cpu_to_le32(caplen); +			memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); +			txcmd = (void *)((u8 *)txcmd->data + caplen); +		} + +		ptr = iwl_queue_dec_wrap(ptr); +	} +	spin_unlock_bh(&cmdq->lock); + +	data->len = cpu_to_le32(len); +	return sizeof(*data) + len; +}  #else  static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,  					 struct dentry *dir) @@ -1341,7 +1741,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,  static const struct iwl_trans_ops trans_ops_pcie = {  	.start_hw = iwl_trans_pcie_start_hw, -	.stop_hw = iwl_trans_pcie_stop_hw, +	.op_mode_leave = iwl_trans_pcie_op_mode_leave,  	.fw_alive = iwl_trans_pcie_fw_alive,  	.start_fw = iwl_trans_pcie_start_fw,  	.stop_device = iwl_trans_pcie_stop_device, @@ -1373,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = {  	.grab_nic_access = iwl_trans_pcie_grab_nic_access,  	.release_nic_access = iwl_trans_pcie_release_nic_access,  	.set_bits_mask = iwl_trans_pcie_set_bits_mask, + +#ifdef CONFIG_IWLWIFI_DEBUGFS +	.dump_data = iwl_trans_pcie_dump_data, +#endif  };  struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, @@ -1401,6 +1805,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  	spin_lock_init(&trans_pcie->reg_lock);  	init_waitqueue_head(&trans_pcie->ucode_write_waitq); +	err = pci_enable_device(pdev); +	if (err) +		goto out_no_pci; +  	if (!cfg->base_params->pcie_l1_allowed) {  		/*  		 * W/A - seems to solve weird behavior. We need to remove this @@ -1412,10 +1820,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  				       PCIE_LINK_STATE_CLKPM);  	} -	err = pci_enable_device(pdev); -	if (err) -		goto out_no_pci; -  	pci_set_master(pdev);  	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); @@ -1450,6 +1854,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  	 * PCI Tx retries from interfering with C3 CPU state */  	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); +	trans->dev = &pdev->dev; +	trans_pcie->pci_dev = pdev; +	iwl_disable_interrupts(trans); +  	err = pci_enable_msi(pdev);  	if (err) {  		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); @@ -1461,8 +1869,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  		}  	} -	trans->dev = &pdev->dev; -	trans_pcie->pci_dev = pdev;  	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);  	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;  	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), @@ -1488,12 +1894,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  		goto out_pci_disable_msi;  	} -	trans_pcie->inta_mask = CSR_INI_SET_MASK; -  	if (iwl_pcie_alloc_ict(trans))  		goto out_free_cmd_pool; -	err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict, +	err = request_threaded_irq(pdev->irq, iwl_pcie_isr,  				   iwl_pcie_irq_handler,  				   IRQF_SHARED, DRV_NAME, trans);  	if (err) { @@ -1501,6 +1905,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,  		goto out_free_ict;  	} +	trans_pcie->inta_mask = CSR_INI_SET_MASK; +  	return trans;  out_free_ict:  | 
