diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
51 files changed, 6561 insertions, 5071 deletions
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 822660483f9..48ab9142af3 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -5,14 +5,16 @@ iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o -iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o -iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o +iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-power.o +iwlagn-objs += iwl-rx.o iwl-sta.o iwlagn-objs += iwl-scan.o iwl-led.o -iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o +iwlagn-objs += iwl-agn-rxon.o iwlagn-objs += iwl-5000.o iwlagn-objs += iwl-6000.o iwlagn-objs += iwl-1000.o iwlagn-objs += iwl-2000.o +iwlagn-objs += iwl-pci.o +iwlagn-objs += iwl-trans.o iwl-trans-rx-pcie.o iwl-trans-tx-pcie.o iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index 2a88e73bb39..01b49eb8c8e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -27,8 +27,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/netdevice.h> @@ -127,7 +125,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) iwlagn_mod_params.num_of_queues; priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; priv->hw_params.scd_bc_tbls_size = priv->cfg->base_params->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); @@ -140,7 +137,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); if (priv->cfg->rx_with_siso_diversity) @@ -172,15 +168,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) static struct iwl_lib_ops iwl1000_lib = { .set_hw_params = iwl1000_hw_set_hw_params, - .rx_handler_setup = iwlagn_rx_handler_setup, - .setup_deferred_work = iwlagn_setup_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, - .apm_ops = { - .init = iwl_apm_init, - .config = iwl1000_nic_config, - }, + .nic_config = iwl1000_nic_config, .eeprom_ops = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -191,19 +179,8 @@ static struct iwl_lib_ops iwl1000_lib = { EEPROM_REG_BAND_24_HT40_CHANNELS, EEPROM_REGULATORY_BAND_NO_HT40, }, - .query_addr = iwlagn_eeprom_query_addr, }, - .temp_ops = { - .temperature = iwlagn_temperature, - }, - .txfifo_flush = iwlagn_txfifo_flush, - .dev_txfifo_flush = iwlagn_dev_txfifo_flush, -}; - -static const struct iwl_ops iwl1000_ops = { - .lib = &iwl1000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, + .temperature = iwlagn_temperature, }; static struct iwl_base_params iwl1000_base_params = { @@ -224,6 +201,7 @@ static struct iwl_base_params iwl1000_base_params = { static struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ + .smps_mode = IEEE80211_SMPS_STATIC, }; #define IWL_DEVICE_1000 \ @@ -232,7 +210,7 @@ static struct iwl_ht_params iwl1000_ht_params = { .ucode_api_min = IWL1000_UCODE_API_MIN, \ .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .ops = &iwl1000_ops, \ + .lib = &iwl1000_lib, \ .base_params = &iwl1000_base_params, \ .led_mode = IWL_LED_BLINK @@ -253,7 +231,7 @@ struct iwl_cfg iwl1000_bg_cfg = { .ucode_api_min = IWL100_UCODE_API_MIN, \ .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ - .ops = &iwl1000_ops, \ + .lib = &iwl1000_lib, \ .base_params = &iwl1000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 3df76f53a41..0e13f0bb2e1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -27,8 +27,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/netdevice.h> @@ -52,11 +50,13 @@ #define IWL2030_UCODE_API_MAX 5 #define IWL2000_UCODE_API_MAX 5 #define IWL105_UCODE_API_MAX 5 +#define IWL135_UCODE_API_MAX 5 /* Lowest firmware API version supported */ #define IWL2030_UCODE_API_MIN 5 #define IWL2000_UCODE_API_MIN 5 #define IWL105_UCODE_API_MIN 5 +#define IWL135_UCODE_API_MIN 5 #define IWL2030_FW_PRE "iwlwifi-2030-" #define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" @@ -67,6 +67,9 @@ #define IWL105_FW_PRE "iwlwifi-105-" #define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode" +#define IWL135_FW_PRE "iwlwifi-135-" +#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE #api ".ucode" + static void iwl2000_set_ct_threshold(struct iwl_priv *priv) { /* want Celsius */ @@ -77,28 +80,11 @@ static void iwl2000_set_ct_threshold(struct iwl_priv *priv) /* NIC configuration for 2000 series */ static void iwl2000_nic_config(struct iwl_priv *priv) { - u16 radio_cfg; - - radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); - - /* write radio config values to register */ - if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | - EEPROM_RF_CFG_STEP_MSK(radio_cfg) | - EEPROM_RF_CFG_DASH_MSK(radio_cfg)); - - /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + iwl_rf_config(priv); if (priv->cfg->iq_invert) iwl_set_bit(priv, CSR_GP_DRIVER_REG, CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); - - if (priv->cfg->disable_otp_refresh) - iwl_write_prph(priv, APMG_ANALOG_SVR_REG, 0x80000010); } static struct iwl_sensitivity_ranges iwl2000_sensitivity = { @@ -134,7 +120,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) iwlagn_mod_params.num_of_queues; priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; priv->hw_params.scd_bc_tbls_size = priv->cfg->base_params->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); @@ -147,7 +132,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); if (priv->cfg->rx_with_siso_diversity) @@ -169,7 +153,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) BIT(IWL_CALIB_TX_IQ) | BIT(IWL_CALIB_BASE_BAND); if (priv->cfg->need_dc_calib) - priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX); + priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; if (priv->cfg->need_temp_offset_calib) priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); @@ -180,16 +164,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) static struct iwl_lib_ops iwl2000_lib = { .set_hw_params = iwl2000_hw_set_hw_params, - .rx_handler_setup = iwlagn_rx_handler_setup, - .setup_deferred_work = iwlagn_bt_setup_deferred_work, - .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, - .apm_ops = { - .init = iwl_apm_init, - .config = iwl2000_nic_config, - }, + .nic_config = iwl2000_nic_config, .eeprom_ops = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -200,38 +175,30 @@ static struct iwl_lib_ops iwl2000_lib = { EEPROM_6000_REG_BAND_24_HT40_CHANNELS, EEPROM_REGULATORY_BAND_NO_HT40, }, - .query_addr = iwlagn_eeprom_query_addr, .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, }, - .temp_ops = { - .temperature = iwlagn_temperature, - }, - .txfifo_flush = iwlagn_txfifo_flush, - .dev_txfifo_flush = iwlagn_dev_txfifo_flush, + .temperature = iwlagn_temperature, }; -static const struct iwl_ops iwl2000_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, -}; - -static const struct iwl_ops iwl2030_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_bt_hcmd, - .utils = &iwlagn_hcmd_utils, -}; - -static const struct iwl_ops iwl105_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, -}; - -static const struct iwl_ops iwl135_ops = { - .lib = &iwl2000_lib, - .hcmd = &iwlagn_bt_hcmd, - .utils = &iwlagn_hcmd_utils, +static struct iwl_lib_ops iwl2030_lib = { + .set_hw_params = iwl2000_hw_set_hw_params, + .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup, + .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work, + .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, + .nic_config = iwl2000_nic_config, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, + }, + .temperature = iwlagn_temperature, }; static struct iwl_base_params iwl2000_base_params = { @@ -292,13 +259,12 @@ static struct iwl_bt_params iwl2030_bt_params = { .ucode_api_min = IWL2000_UCODE_API_MIN, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl2000_ops, \ + .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE, \ - .iq_invert = true, \ - .disable_otp_refresh = true \ + .iq_invert = true \ struct iwl_cfg iwl2000_2bgn_cfg = { .name = "2000 Series 2x2 BGN", @@ -317,7 +283,7 @@ struct iwl_cfg iwl2000_2bg_cfg = { .ucode_api_min = IWL2030_UCODE_API_MIN, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl2030_ops, \ + .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ .need_dc_calib = true, \ @@ -343,13 +309,14 @@ struct iwl_cfg iwl2030_2bg_cfg = { .ucode_api_min = IWL105_UCODE_API_MIN, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl105_ops, \ + .lib = &iwl2000_lib, \ .base_params = &iwl2000_base_params, \ .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true, \ - .rx_with_siso_diversity = true \ + .rx_with_siso_diversity = true, \ + .iq_invert = true \ struct iwl_cfg iwl105_bg_cfg = { .name = "105 Series 1x1 BG", @@ -363,27 +330,28 @@ struct iwl_cfg iwl105_bgn_cfg = { }; #define IWL_DEVICE_135 \ - .fw_name_pre = IWL105_FW_PRE, \ - .ucode_api_max = IWL105_UCODE_API_MAX, \ - .ucode_api_min = IWL105_UCODE_API_MIN, \ + .fw_name_pre = IWL135_FW_PRE, \ + .ucode_api_max = IWL135_UCODE_API_MAX, \ + .ucode_api_min = IWL135_UCODE_API_MIN, \ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ - .ops = &iwl135_ops, \ + .lib = &iwl2030_lib, \ .base_params = &iwl2030_base_params, \ .bt_params = &iwl2030_bt_params, \ .need_dc_calib = true, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true, \ - .rx_with_siso_diversity = true \ + .rx_with_siso_diversity = true, \ + .iq_invert = true \ struct iwl_cfg iwl135_bg_cfg = { - .name = "105 Series 1x1 BG/BT", + .name = "135 Series 1x1 BG/BT", IWL_DEVICE_135, }; struct iwl_cfg iwl135_bgn_cfg = { - .name = "105 Series 1x1 BGN/BT", + .name = "135 Series 1x1 BGN/BT", IWL_DEVICE_135, .ht_params = &iwl2000_ht_params, }; @@ -391,3 +359,4 @@ struct iwl_cfg iwl135_bgn_cfg = { MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h index 05ad47628b6..f9630a3c79f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h @@ -75,7 +75,7 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) { u16 temperature, voltage; __le16 *temp_calib = - (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE); temperature = le16_to_cpu(temp_calib[0]); voltage = le16_to_cpu(temp_calib[1]); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index e816c27db79..3eeb12ebe6e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -27,8 +27,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/skbuff.h> @@ -48,6 +46,7 @@ #include "iwl-agn.h" #include "iwl-agn-hw.h" #include "iwl-5000-hw.h" +#include "iwl-trans.h" /* Highest firmware API version supported */ #define IWL5000_UCODE_API_MAX 5 @@ -67,23 +66,10 @@ static void iwl5000_nic_config(struct iwl_priv *priv) { unsigned long flags; - u16 radio_cfg; - spin_lock_irqsave(&priv->lock, flags); - - radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); - - /* write radio config values to register */ - if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_RF_CONFIG_TYPE_MAX) - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | - EEPROM_RF_CFG_STEP_MSK(radio_cfg) | - EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + iwl_rf_config(priv); - /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + spin_lock_irqsave(&priv->lock, flags); /* W/A : NIC is stuck in a reset state after Early PCIe power off * (PCIe power is lost before PERST# is asserted), @@ -171,7 +157,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) iwlagn_mod_params.num_of_queues; priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; priv->hw_params.scd_bc_tbls_size = priv->cfg->base_params->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); @@ -184,7 +169,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); @@ -216,7 +200,6 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) iwlagn_mod_params.num_of_queues; priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; priv->hw_params.scd_bc_tbls_size = priv->cfg->base_params->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); @@ -229,7 +212,6 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); @@ -333,21 +315,13 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, return -EFAULT; } - return iwl_send_cmd_sync(priv, &hcmd); + return trans_send_cmd(&priv->trans, &hcmd); } static struct iwl_lib_ops iwl5000_lib = { .set_hw_params = iwl5000_hw_set_hw_params, - .rx_handler_setup = iwlagn_rx_handler_setup, - .setup_deferred_work = iwlagn_setup_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, .set_channel_switch = iwl5000_hw_channel_switch, - .apm_ops = { - .init = iwl_apm_init, - .config = iwl5000_nic_config, - }, + .nic_config = iwl5000_nic_config, .eeprom_ops = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -358,27 +332,14 @@ static struct iwl_lib_ops iwl5000_lib = { EEPROM_REG_BAND_24_HT40_CHANNELS, EEPROM_REG_BAND_52_HT40_CHANNELS }, - .query_addr = iwlagn_eeprom_query_addr, }, - .temp_ops = { - .temperature = iwlagn_temperature, - }, - .txfifo_flush = iwlagn_txfifo_flush, - .dev_txfifo_flush = iwlagn_dev_txfifo_flush, + .temperature = iwlagn_temperature, }; static struct iwl_lib_ops iwl5150_lib = { .set_hw_params = iwl5150_hw_set_hw_params, - .rx_handler_setup = iwlagn_rx_handler_setup, - .setup_deferred_work = iwlagn_setup_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, .set_channel_switch = iwl5000_hw_channel_switch, - .apm_ops = { - .init = iwl_apm_init, - .config = iwl5000_nic_config, - }, + .nic_config = iwl5000_nic_config, .eeprom_ops = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -389,25 +350,8 @@ static struct iwl_lib_ops iwl5150_lib = { EEPROM_REG_BAND_24_HT40_CHANNELS, EEPROM_REG_BAND_52_HT40_CHANNELS }, - .query_addr = iwlagn_eeprom_query_addr, }, - .temp_ops = { - .temperature = iwl5150_temperature, - }, - .txfifo_flush = iwlagn_txfifo_flush, - .dev_txfifo_flush = iwlagn_dev_txfifo_flush, -}; - -static const struct iwl_ops iwl5000_ops = { - .lib = &iwl5000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, -}; - -static const struct iwl_ops iwl5150_ops = { - .lib = &iwl5150_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, + .temperature = iwl5150_temperature, }; static struct iwl_base_params iwl5000_base_params = { @@ -432,7 +376,7 @@ static struct iwl_ht_params iwl5000_ht_params = { .ucode_api_min = IWL5000_UCODE_API_MIN, \ .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ - .ops = &iwl5000_ops, \ + .lib = &iwl5000_lib, \ .base_params = &iwl5000_base_params, \ .led_mode = IWL_LED_BLINK @@ -475,7 +419,7 @@ struct iwl_cfg iwl5350_agn_cfg = { .ucode_api_min = IWL5000_UCODE_API_MIN, .eeprom_ver = EEPROM_5050_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, - .ops = &iwl5000_ops, + .lib = &iwl5000_lib, .base_params = &iwl5000_base_params, .ht_params = &iwl5000_ht_params, .led_mode = IWL_LED_BLINK, @@ -488,7 +432,7 @@ struct iwl_cfg iwl5350_agn_cfg = { .ucode_api_min = IWL5150_UCODE_API_MIN, \ .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ - .ops = &iwl5150_ops, \ + .lib = &iwl5150_lib, \ .base_params = &iwl5000_base_params, \ .need_dc_calib = true, \ .led_mode = IWL_LED_BLINK, \ diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 5b150bc70b0..973d1972e8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -27,8 +27,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/netdevice.h> @@ -47,6 +45,7 @@ #include "iwl-helpers.h" #include "iwl-agn-hw.h" #include "iwl-6000-hw.h" +#include "iwl-trans.h" /* Highest firmware API version supported */ #define IWL6000_UCODE_API_MAX 4 @@ -98,21 +97,7 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv) /* NIC configuration for 6000 series */ static void iwl6000_nic_config(struct iwl_priv *priv) { - u16 radio_cfg; - - radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); - - /* write radio config values to register */ - if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | - EEPROM_RF_CFG_STEP_MSK(radio_cfg) | - EEPROM_RF_CFG_DASH_MSK(radio_cfg)); - - /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | - CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + iwl_rf_config(priv); /* no locking required for register write */ if (priv->cfg->pa_type == IWL_PA_INTERNAL) { @@ -121,10 +106,8 @@ static void iwl6000_nic_config(struct iwl_priv *priv) CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); } /* do additional nic configuration if needed */ - if (priv->cfg->ops->nic && - priv->cfg->ops->nic->additional_nic_config) { - priv->cfg->ops->nic->additional_nic_config(priv); - } + if (priv->cfg->additional_nic_config) + priv->cfg->additional_nic_config(priv); } static struct iwl_sensitivity_ranges iwl6000_sensitivity = { @@ -160,7 +143,6 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) iwlagn_mod_params.num_of_queues; priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; - priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; priv->hw_params.scd_bc_tbls_size = priv->cfg->base_params->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); @@ -173,7 +155,6 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ); - priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); if (priv->cfg->rx_with_siso_diversity) @@ -195,7 +176,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) BIT(IWL_CALIB_TX_IQ) | BIT(IWL_CALIB_BASE_BAND); if (priv->cfg->need_dc_calib) - priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX); + priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; if (priv->cfg->need_temp_offset_calib) priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); @@ -272,21 +253,13 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, return -EFAULT; } - return iwl_send_cmd_sync(priv, &hcmd); + return trans_send_cmd(&priv->trans, &hcmd); } static struct iwl_lib_ops iwl6000_lib = { .set_hw_params = iwl6000_hw_set_hw_params, - .rx_handler_setup = iwlagn_rx_handler_setup, - .setup_deferred_work = iwlagn_setup_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, .set_channel_switch = iwl6000_hw_channel_switch, - .apm_ops = { - .init = iwl_apm_init, - .config = iwl6000_nic_config, - }, + .nic_config = iwl6000_nic_config, .eeprom_ops = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -297,29 +270,18 @@ static struct iwl_lib_ops iwl6000_lib = { EEPROM_6000_REG_BAND_24_HT40_CHANNELS, EEPROM_REG_BAND_52_HT40_CHANNELS }, - .query_addr = iwlagn_eeprom_query_addr, .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, }, - .temp_ops = { - .temperature = iwlagn_temperature, - }, - .txfifo_flush = iwlagn_txfifo_flush, - .dev_txfifo_flush = iwlagn_dev_txfifo_flush, + .temperature = iwlagn_temperature, }; static struct iwl_lib_ops iwl6030_lib = { .set_hw_params = iwl6000_hw_set_hw_params, - .rx_handler_setup = iwlagn_bt_rx_handler_setup, - .setup_deferred_work = iwlagn_bt_setup_deferred_work, + .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup, + .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work, .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, - .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, - .send_tx_power = iwlagn_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, .set_channel_switch = iwl6000_hw_channel_switch, - .apm_ops = { - .init = iwl_apm_init, - .config = iwl6000_nic_config, - }, + .nic_config = iwl6000_nic_config, .eeprom_ops = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, @@ -330,48 +292,9 @@ static struct iwl_lib_ops iwl6030_lib = { EEPROM_6000_REG_BAND_24_HT40_CHANNELS, EEPROM_REG_BAND_52_HT40_CHANNELS }, - .query_addr = iwlagn_eeprom_query_addr, .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, }, - .temp_ops = { - .temperature = iwlagn_temperature, - }, - .txfifo_flush = iwlagn_txfifo_flush, - .dev_txfifo_flush = iwlagn_dev_txfifo_flush, -}; - -static struct iwl_nic_ops iwl6050_nic_ops = { - .additional_nic_config = &iwl6050_additional_nic_config, -}; - -static struct iwl_nic_ops iwl6150_nic_ops = { - .additional_nic_config = &iwl6150_additional_nic_config, -}; - -static const struct iwl_ops iwl6000_ops = { - .lib = &iwl6000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, -}; - -static const struct iwl_ops iwl6050_ops = { - .lib = &iwl6000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, - .nic = &iwl6050_nic_ops, -}; - -static const struct iwl_ops iwl6150_ops = { - .lib = &iwl6000_lib, - .hcmd = &iwlagn_hcmd, - .utils = &iwlagn_hcmd_utils, - .nic = &iwl6150_nic_ops, -}; - -static const struct iwl_ops iwl6030_ops = { - .lib = &iwl6030_lib, - .hcmd = &iwlagn_bt_hcmd, - .utils = &iwlagn_hcmd_utils, + .temperature = iwlagn_temperature, }; static struct iwl_base_params iwl6000_base_params = { @@ -447,7 +370,7 @@ static struct iwl_bt_params iwl6000_bt_params = { .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ - .ops = &iwl6000_ops, \ + .lib = &iwl6000_lib, \ .base_params = &iwl6000_g2_base_params, \ .need_dc_calib = true, \ .need_temp_offset_calib = true, \ @@ -475,7 +398,7 @@ struct iwl_cfg iwl6005_2bg_cfg = { .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ - .ops = &iwl6030_ops, \ + .lib = &iwl6030_lib, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ .need_dc_calib = true, \ @@ -556,7 +479,7 @@ struct iwl_cfg iwl130_bg_cfg = { .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ - .ops = &iwl6000_ops, \ + .lib = &iwl6000_lib, \ .base_params = &iwl6000_base_params, \ .pa_type = IWL_PA_INTERNAL, \ .led_mode = IWL_LED_BLINK @@ -583,7 +506,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = { .ucode_api_min = IWL6050_UCODE_API_MIN, \ .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ - .ops = &iwl6050_ops, \ + .lib = &iwl6000_lib, \ + .additional_nic_config = iwl6050_additional_nic_config, \ .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ @@ -606,7 +530,8 @@ struct iwl_cfg iwl6050_2abg_cfg = { .fw_name_pre = IWL6050_FW_PRE, \ .ucode_api_max = IWL6050_UCODE_API_MAX, \ .ucode_api_min = IWL6050_UCODE_API_MIN, \ - .ops = &iwl6150_ops, \ + .lib = &iwl6000_lib, \ + .additional_nic_config = iwl6150_additional_nic_config, \ .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ .base_params = &iwl6050_base_params, \ @@ -632,7 +557,7 @@ struct iwl_cfg iwl6000_3agn_cfg = { .ucode_api_min = IWL6000_UCODE_API_MIN, .eeprom_ver = EEPROM_6000_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, - .ops = &iwl6000_ops, + .lib = &iwl6000_lib, .base_params = &iwl6000_base_params, .ht_params = &iwl6000_ht_params, .need_dc_calib = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c index c9255def108..72d6297602b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c @@ -66,6 +66,8 @@ #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-agn-calib.h" +#include "iwl-trans.h" +#include "iwl-agn.h" /***************************************************************************** * INIT calibrations framework @@ -87,6 +89,7 @@ int iwl_send_calib_results(struct iwl_priv *priv) struct iwl_host_cmd hcmd = { .id = REPLY_PHY_CALIBRATION_CMD, + .flags = CMD_SYNC, }; for (i = 0; i < IWL_CALIB_MAX; i++) { @@ -95,7 +98,7 @@ int iwl_send_calib_results(struct iwl_priv *priv) hcmd.len[0] = priv->calib_results[i].buf_len; hcmd.data[0] = priv->calib_results[i].buf; hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - ret = iwl_send_cmd_sync(priv, &hcmd); + ret = trans_send_cmd(&priv->trans, &hcmd); if (ret) { IWL_ERR(priv, "Error %d iteration %d\n", ret, i); @@ -481,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv) memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), sizeof(u16)*HD_TABLE_SIZE); - return iwl_send_cmd(priv, &cmd_out); + return trans_send_cmd(&priv->trans, &cmd_out); } /* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ @@ -545,7 +548,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]), sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES); - return iwl_send_cmd(priv, &cmd_out); + return trans_send_cmd(&priv->trans, &cmd_out); } void iwl_init_sensitivity(struct iwl_priv *priv) @@ -837,6 +840,65 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, active_chains); } +static void iwlagn_gain_computation(struct iwl_priv *priv, + u32 average_noise[NUM_RX_CHAINS], + u16 min_average_noise_antenna_i, + u32 min_average_noise, + u8 default_chain) +{ + int i; + s32 delta_g; + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + + /* + * Find Gain Code for the chains based on "default chain" + */ + for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { + if ((data->disconn_array[i])) { + data->delta_gain_code[i] = 0; + continue; + } + + delta_g = (priv->cfg->base_params->chain_noise_scale * + ((s32)average_noise[default_chain] - + (s32)average_noise[i])) / 1500; + + /* bound gain by 2 bits value max, 3rd bit is sign */ + data->delta_gain_code[i] = + min(abs(delta_g), + (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + + if (delta_g < 0) + /* + * set negative sign ... + * note to Intel developers: This is uCode API format, + * not the format of any internal device registers. + * Do not change this format for e.g. 6050 or similar + * devices. Change format only if more resolution + * (i.e. more than 2 bits magnitude) is needed. + */ + data->delta_gain_code[i] |= (1 << 2); + } + + IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", + data->delta_gain_code[1], data->delta_gain_code[2]); + + if (!data->radio_write) { + struct iwl_calib_chain_noise_gain_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + + iwl_set_calib_hdr(&cmd.hdr, + priv->phy_calib_chain_noise_gain_cmd); + cmd.delta_gain_1 = data->delta_gain_code[1]; + cmd.delta_gain_2 = data->delta_gain_code[2]; + trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD, + CMD_ASYNC, sizeof(cmd), &cmd); + + data->radio_write = 1; + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } +} /* * Accumulate 16 beacons of signal and noise statistics for each of @@ -991,16 +1053,14 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv) IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", min_average_noise, min_average_noise_antenna_i); - if (priv->cfg->ops->utils->gain_computation) - priv->cfg->ops->utils->gain_computation(priv, average_noise, + iwlagn_gain_computation(priv, average_noise, min_average_noise_antenna_i, min_average_noise, find_first_chain(priv->cfg->valid_rx_ant)); /* Some power changes may have been made during the calibration. * Update and commit the RXON */ - if (priv->cfg->ops->lib->update_chain_flags) - priv->cfg->ops->lib->update_chain_flags(priv); + iwl_update_chain_flags(priv); data->state = IWL_CHAIN_NOISE_DONE; iwl_power_update_mode(priv, false); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h index 4ef4dd93425..a869fc9205d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h @@ -71,13 +71,6 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv); void iwl_init_sensitivity(struct iwl_priv *priv); void iwl_reset_run_time_calib(struct iwl_priv *priv); -static inline void iwl_chain_noise_reset(struct iwl_priv *priv) -{ - - if (!priv->disable_chain_noise_cal && - priv->cfg->ops->utils->chain_noise_reset) - priv->cfg->ops->utils->chain_noise_reset(priv); -} int iwl_send_calib_results(struct iwl_priv *priv); int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c index 2ef9448b1c2..b8347db850e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c @@ -108,18 +108,16 @@ err: int iwl_eeprom_check_sku(struct iwl_priv *priv) { - u16 eeprom_sku; u16 radio_cfg; - eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); - if (!priv->cfg->sku) { /* not using sku overwrite */ - priv->cfg->sku = - ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> - EEPROM_SKU_CAP_BAND_POS); - if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) - priv->cfg->sku |= IWL_SKU_N; + priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE && + !priv->cfg->ht_params) { + IWL_ERR(priv, "Invalid 11n configuration\n"); + return -EINVAL; + } } if (!priv->cfg->sku) { IWL_ERR(priv, "Invalid device sku\n"); @@ -152,7 +150,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv) void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) { - const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv, + const u8 *addr = iwl_eeprom_query_addr(priv, EEPROM_MAC_ADDRESS); memcpy(mac, addr, ETH_ALEN); } @@ -247,10 +245,10 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv) BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); /* the length is in 16-bit words, but we want entries */ - txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); + txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; - txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS); + txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS); for (idx = 0; idx < entries; idx++) { txp = &txp_array[idx]; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c deleted file mode 100644 index 23fa93deae9..00000000000 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c +++ /dev/null @@ -1,328 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/sched.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-io.h" -#include "iwl-agn.h" - -int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) -{ - struct iwl_tx_ant_config_cmd tx_ant_cmd = { - .valid = cpu_to_le32(valid_tx_ant), - }; - - if (IWL_UCODE_API(priv->ucode_ver) > 1) { - IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); - return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, - sizeof(struct iwl_tx_ant_config_cmd), - &tx_ant_cmd); - } else { - IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); - return -EOPNOTSUPP; - } -} - -static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) -{ - u16 size = (u16)sizeof(struct iwl_addsta_cmd); - struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; - memcpy(addsta, cmd, size); - /* resrved in 5000 */ - addsta->rate_n_flags = cpu_to_le16(0); - return size; -} - -static void iwlagn_gain_computation(struct iwl_priv *priv, - u32 average_noise[NUM_RX_CHAINS], - u16 min_average_noise_antenna_i, - u32 min_average_noise, - u8 default_chain) -{ - int i; - s32 delta_g; - struct iwl_chain_noise_data *data = &priv->chain_noise_data; - - /* - * Find Gain Code for the chains based on "default chain" - */ - for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { - if ((data->disconn_array[i])) { - data->delta_gain_code[i] = 0; - continue; - } - - delta_g = (priv->cfg->base_params->chain_noise_scale * - ((s32)average_noise[default_chain] - - (s32)average_noise[i])) / 1500; - - /* bound gain by 2 bits value max, 3rd bit is sign */ - data->delta_gain_code[i] = - min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); - - if (delta_g < 0) - /* - * set negative sign ... - * note to Intel developers: This is uCode API format, - * not the format of any internal device registers. - * Do not change this format for e.g. 6050 or similar - * devices. Change format only if more resolution - * (i.e. more than 2 bits magnitude) is needed. - */ - data->delta_gain_code[i] |= (1 << 2); - } - - IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", - data->delta_gain_code[1], data->delta_gain_code[2]); - - if (!data->radio_write) { - struct iwl_calib_chain_noise_gain_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - - cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_gain_cmd; - cmd.hdr.first_group = 0; - cmd.hdr.groups_num = 1; - cmd.hdr.data_valid = 1; - cmd.delta_gain_1 = data->delta_gain_code[1]; - cmd.delta_gain_2 = data->delta_gain_code[2]; - iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, - sizeof(cmd), &cmd, NULL); - - data->radio_write = 1; - data->state = IWL_CHAIN_NOISE_CALIBRATED; - } -} - -static void iwlagn_chain_noise_reset(struct iwl_priv *priv) -{ - struct iwl_chain_noise_data *data = &priv->chain_noise_data; - int ret; - - if ((data->state == IWL_CHAIN_NOISE_ALIVE) && - iwl_is_any_associated(priv)) { - struct iwl_calib_chain_noise_reset_cmd cmd; - - /* clear data for chain noise calibration algorithm */ - data->chain_noise_a = 0; - data->chain_noise_b = 0; - data->chain_noise_c = 0; - data->chain_signal_a = 0; - data->chain_signal_b = 0; - data->chain_signal_c = 0; - data->beacon_count = 0; - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_reset_cmd; - cmd.hdr.first_group = 0; - cmd.hdr.groups_num = 1; - cmd.hdr.data_valid = 1; - ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, - sizeof(cmd), &cmd); - if (ret) - IWL_ERR(priv, - "Could not send REPLY_PHY_CALIBRATION_CMD\n"); - data->state = IWL_CHAIN_NOISE_ACCUMULATE; - IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); - } -} - -static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags) -{ - if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || - info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT || - info->flags & IEEE80211_TX_CTL_AMPDU) - *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; -} - -/* Calc max signal level (dBm) among 3 possible receivers */ -static int iwlagn_calc_rssi(struct iwl_priv *priv, - struct iwl_rx_phy_res *rx_resp) -{ - /* data from PHY/DSP regarding signal strength, etc., - * contents are always there, not configurable by host - */ - struct iwlagn_non_cfg_phy *ncphy = - (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf; - u32 val, rssi_a, rssi_b, rssi_c, max_rssi; - u8 agc; - - val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]); - agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS; - - /* Find max rssi among 3 possible receivers. - * These values are measured by the digital signal processor (DSP). - * They should stay fairly constant even as the signal strength varies, - * if the radio's automatic gain control (AGC) is working right. - * AGC value (see below) will provide the "interesting" info. - */ - val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]); - rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >> - IWLAGN_OFDM_RSSI_A_BIT_POS; - rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >> - IWLAGN_OFDM_RSSI_B_BIT_POS; - val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]); - rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >> - IWLAGN_OFDM_RSSI_C_BIT_POS; - - max_rssi = max_t(u32, rssi_a, rssi_b); - max_rssi = max_t(u32, max_rssi, rssi_c); - - IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", - rssi_a, rssi_b, rssi_c, max_rssi, agc); - - /* dBm = max_rssi dB - agc dB - constant. - * Higher AGC (higher radio gain) means lower signal. */ - return max_rssi - agc - IWLAGN_RSSI_OFFSET; -} - -static int iwlagn_set_pan_params(struct iwl_priv *priv) -{ - struct iwl_wipan_params_cmd cmd; - struct iwl_rxon_context *ctx_bss, *ctx_pan; - int slot0 = 300, slot1 = 0; - int ret; - - if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS)) - return 0; - - BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); - - lockdep_assert_held(&priv->mutex); - - ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; - ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; - - /* - * If the PAN context is inactive, then we don't need - * to update the PAN parameters, the last thing we'll - * have done before it goes inactive is making the PAN - * parameters be WLAN-only. - */ - if (!ctx_pan->is_active) - return 0; - - memset(&cmd, 0, sizeof(cmd)); - - /* only 2 slots are currently allowed */ - cmd.num_slots = 2; - - cmd.slots[0].type = 0; /* BSS */ - cmd.slots[1].type = 1; /* PAN */ - - if (priv->_agn.hw_roc_channel) { - /* both contexts must be used for this to happen */ - slot1 = priv->_agn.hw_roc_duration; - slot0 = IWL_MIN_SLOT_TIME; - } else if (ctx_bss->vif && ctx_pan->vif) { - int bcnint = ctx_pan->vif->bss_conf.beacon_int; - int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; - - /* should be set, but seems unused?? */ - cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); - - if (ctx_pan->vif->type == NL80211_IFTYPE_AP && - bcnint && - bcnint != ctx_bss->vif->bss_conf.beacon_int) { - IWL_ERR(priv, - "beacon intervals don't match (%d, %d)\n", - ctx_bss->vif->bss_conf.beacon_int, - ctx_pan->vif->bss_conf.beacon_int); - } else - bcnint = max_t(int, bcnint, - ctx_bss->vif->bss_conf.beacon_int); - if (!bcnint) - bcnint = DEFAULT_BEACON_INTERVAL; - slot0 = bcnint / 2; - slot1 = bcnint - slot0; - - if (test_bit(STATUS_SCAN_HW, &priv->status) || - (!ctx_bss->vif->bss_conf.idle && - !ctx_bss->vif->bss_conf.assoc)) { - slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; - slot1 = IWL_MIN_SLOT_TIME; - } else if (!ctx_pan->vif->bss_conf.idle && - !ctx_pan->vif->bss_conf.assoc) { - slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME; - slot0 = IWL_MIN_SLOT_TIME; - } - } else if (ctx_pan->vif) { - slot0 = 0; - slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) * - ctx_pan->vif->bss_conf.beacon_int; - slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); - - if (test_bit(STATUS_SCAN_HW, &priv->status)) { - slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; - slot1 = IWL_MIN_SLOT_TIME; - } - } - - cmd.slots[0].width = cpu_to_le16(slot0); - cmd.slots[1].width = cpu_to_le16(slot1); - - ret = iwl_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, sizeof(cmd), &cmd); - if (ret) - IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); - - return ret; -} - -struct iwl_hcmd_ops iwlagn_hcmd = { - .commit_rxon = iwlagn_commit_rxon, - .set_rxon_chain = iwlagn_set_rxon_chain, - .set_tx_ant = iwlagn_send_tx_ant_config, - .send_bt_config = iwl_send_bt_config, - .set_pan_params = iwlagn_set_pan_params, -}; - -struct iwl_hcmd_ops iwlagn_bt_hcmd = { - .commit_rxon = iwlagn_commit_rxon, - .set_rxon_chain = iwlagn_set_rxon_chain, - .set_tx_ant = iwlagn_send_tx_ant_config, - .send_bt_config = iwlagn_send_advance_bt_config, - .set_pan_params = iwlagn_set_pan_params, -}; - -struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { - .build_addsta_hcmd = iwlagn_build_addsta_hcmd, - .gain_computation = iwlagn_gain_computation, - .chain_noise_reset = iwlagn_chain_noise_reset, - .tx_cmd_protection = iwlagn_tx_cmd_protection, - .calc_rssi = iwlagn_calc_rssi, - .request_scan = iwlagn_request_scan, - .post_scan = iwlagn_post_scan, -}; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h index 7bd19f4e66d..0e5b842529c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h @@ -81,13 +81,6 @@ /* RSSI to dBm */ #define IWLAGN_RSSI_OFFSET 44 -/* PCI registers */ -#define PCI_CFG_RETRY_TIMEOUT 0x041 - -/* PCI register values */ -#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 -#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 - #define IWLAGN_DEFAULT_TX_RETRY 15 /* Limit range of txpower output target to be between these values */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c deleted file mode 100644 index 0d5fda44c3a..00000000000 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c +++ /dev/null @@ -1,306 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/etherdevice.h> -#include <linux/sched.h> -#include <linux/gfp.h> -#include <net/mac80211.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-agn.h" -#include "iwl-helpers.h" - -#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) - -/* Free dram table */ -void iwl_free_isr_ict(struct iwl_priv *priv) -{ - if (priv->_agn.ict_tbl_vir) { - dma_free_coherent(&priv->pci_dev->dev, - (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, - priv->_agn.ict_tbl_vir, - priv->_agn.ict_tbl_dma); - priv->_agn.ict_tbl_vir = NULL; - } -} - - -/* allocate dram shared table it is a PAGE_SIZE aligned - * also reset all data related to ICT table interrupt. - */ -int iwl_alloc_isr_ict(struct iwl_priv *priv) -{ - - /* allocate shrared data table */ - priv->_agn.ict_tbl_vir = - dma_alloc_coherent(&priv->pci_dev->dev, - (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, - &priv->_agn.ict_tbl_dma, GFP_KERNEL); - if (!priv->_agn.ict_tbl_vir) - return -ENOMEM; - - /* align table to PAGE_SIZE boundary */ - priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE); - - IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n", - (unsigned long long)priv->_agn.ict_tbl_dma, - (unsigned long long)priv->_agn.aligned_ict_tbl_dma, - (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma)); - - priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir + - (priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma); - - IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n", - priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir, - (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma)); - - /* reset table and index to all 0 */ - memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); - priv->_agn.ict_index = 0; - - /* add periodic RX interrupt */ - priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC; - return 0; -} - -/* Device is going up inform it about using ICT interrupt table, - * also we need to tell the driver to start using ICT interrupt. - */ -int iwl_reset_ict(struct iwl_priv *priv) -{ - u32 val; - unsigned long flags; - - if (!priv->_agn.ict_tbl_vir) - return 0; - - spin_lock_irqsave(&priv->lock, flags); - iwl_disable_interrupts(priv); - - memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); - - val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT; - - val |= CSR_DRAM_INT_TBL_ENABLE; - val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; - - IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X " - "aligned dma address %Lx\n", - val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma); - - iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val); - priv->_agn.use_ict = true; - priv->_agn.ict_index = 0; - iwl_write32(priv, CSR_INT, priv->inta_mask); - iwl_enable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; -} - -/* Device is going down disable ict interrupt usage */ -void iwl_disable_ict(struct iwl_priv *priv) -{ - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - priv->_agn.use_ict = false; - spin_unlock_irqrestore(&priv->lock, flags); -} - -static irqreturn_t iwl_isr(int irq, void *data) -{ - struct iwl_priv *priv = data; - u32 inta, inta_mask; - unsigned long flags; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_fh; -#endif - if (!priv) - return IRQ_NONE; - - spin_lock_irqsave(&priv->lock, flags); - - /* Disable (but don't clear!) interrupts here to avoid - * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. - * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ - iwl_write32(priv, CSR_INT_MASK, 0x00000000); - - /* Discover which interrupts are active/pending */ - inta = iwl_read32(priv, CSR_INT); - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - if (!inta) { - IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); - goto none; - } - - if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { - /* Hardware disappeared. It might have already raised - * an interrupt */ - IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); - goto unplugged; - } - -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { - inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); - IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, " - "fh 0x%08x\n", inta, inta_mask, inta_fh); - } -#endif - - priv->_agn.inta |= inta; - /* iwl_irq_tasklet() will service interrupts and re-enable them */ - if (likely(inta)) - tasklet_schedule(&priv->irq_tasklet); - else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) - iwl_enable_interrupts(priv); - - unplugged: - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_HANDLED; - - none: - /* re-enable interrupts here since we don't have anything to service. */ - /* only Re-enable if disabled by irq and no schedules tasklet. */ - if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) - iwl_enable_interrupts(priv); - - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_NONE; -} - -/* interrupt handler using ict table, with this interrupt driver will - * stop using INTA register to get device's interrupt, reading this register - * is expensive, device will write interrupts in ICT dram table, increment - * index then will fire interrupt to driver, driver will OR all ICT table - * entries from current index up to table entry with 0 value. the result is - * the interrupt we need to service, driver will set the entries back to 0 and - * set index. - */ -irqreturn_t iwl_isr_ict(int irq, void *data) -{ - struct iwl_priv *priv = data; - u32 inta, inta_mask; - u32 val = 0; - unsigned long flags; - - if (!priv) - return IRQ_NONE; - - /* dram interrupt table not set yet, - * use legacy interrupt. - */ - if (!priv->_agn.use_ict) - return iwl_isr(irq, data); - - spin_lock_irqsave(&priv->lock, flags); - - /* Disable (but don't clear!) interrupts here to avoid - * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. - * If we *don't* have something, we'll re-enable before leaving here. - */ - inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ - iwl_write32(priv, CSR_INT_MASK, 0x00000000); - - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) { - IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); - goto none; - } - - /* read all entries that not 0 start with ict_index */ - while (priv->_agn.ict_tbl[priv->_agn.ict_index]) { - - val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]); - IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n", - priv->_agn.ict_index, - le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index])); - priv->_agn.ict_tbl[priv->_agn.ict_index] = 0; - priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index, - ICT_COUNT); - - } - - /* We should not get this value, just ignore it. */ - if (val == 0xffffffff) - val = 0; - - /* - * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit - * (bit 15 before shifting it to 31) to clear when using interrupt - * coalescing. fortunately, bits 18 and 19 stay set when this happens - * so we use them to decide on the real state of the Rx bit. - * In order words, bit 15 is set if bit 18 or bit 19 are set. - */ - if (val & 0xC0000) - val |= 0x8000; - - inta = (0xff & val) | ((0xff00 & val) << 16); - IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", - inta, inta_mask, val); - - inta &= priv->inta_mask; - priv->_agn.inta |= inta; - - /* iwl_irq_tasklet() will service interrupts and re-enable them */ - if (likely(inta)) - tasklet_schedule(&priv->irq_tasklet); - else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) { - /* Allow interrupt if was disabled by this handler and - * no tasklet was schedules, We should not enable interrupt, - * tasklet will enable it. - */ - iwl_enable_interrupts(priv); - } - - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_HANDLED; - - none: - /* re-enable interrupts here since we don't have anything to service. - * only Re-enable if disabled by irq. - */ - if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) - iwl_enable_interrupts(priv); - - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_NONE; -} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index f803fb62f8b..3bee0f119bc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -39,6 +39,7 @@ #include "iwl-agn-hw.h" #include "iwl-agn.h" #include "iwl-sta.h" +#include "iwl-trans.h" static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) { @@ -52,73 +53,73 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status) switch (status) { case TX_STATUS_POSTPONE_DELAY: - priv->_agn.reply_tx_stats.pp_delay++; + priv->reply_tx_stats.pp_delay++; break; case TX_STATUS_POSTPONE_FEW_BYTES: - priv->_agn.reply_tx_stats.pp_few_bytes++; + priv->reply_tx_stats.pp_few_bytes++; break; case TX_STATUS_POSTPONE_BT_PRIO: - priv->_agn.reply_tx_stats.pp_bt_prio++; + priv->reply_tx_stats.pp_bt_prio++; break; case TX_STATUS_POSTPONE_QUIET_PERIOD: - priv->_agn.reply_tx_stats.pp_quiet_period++; + priv->reply_tx_stats.pp_quiet_period++; break; case TX_STATUS_POSTPONE_CALC_TTAK: - priv->_agn.reply_tx_stats.pp_calc_ttak++; + priv->reply_tx_stats.pp_calc_ttak++; break; case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: - priv->_agn.reply_tx_stats.int_crossed_retry++; + priv->reply_tx_stats.int_crossed_retry++; break; case TX_STATUS_FAIL_SHORT_LIMIT: - priv->_agn.reply_tx_stats.short_limit++; + priv->reply_tx_stats.short_limit++; break; case TX_STATUS_FAIL_LONG_LIMIT: - priv->_agn.reply_tx_stats.long_limit++; + priv->reply_tx_stats.long_limit++; break; case TX_STATUS_FAIL_FIFO_UNDERRUN: - priv->_agn.reply_tx_stats.fifo_underrun++; + priv->reply_tx_stats.fifo_underrun++; break; case TX_STATUS_FAIL_DRAIN_FLOW: - priv->_agn.reply_tx_stats.drain_flow++; + priv->reply_tx_stats.drain_flow++; break; case TX_STATUS_FAIL_RFKILL_FLUSH: - priv->_agn.reply_tx_stats.rfkill_flush++; + priv->reply_tx_stats.rfkill_flush++; break; case TX_STATUS_FAIL_LIFE_EXPIRE: - priv->_agn.reply_tx_stats.life_expire++; + priv->reply_tx_stats.life_expire++; break; case TX_STATUS_FAIL_DEST_PS: - priv->_agn.reply_tx_stats.dest_ps++; + priv->reply_tx_stats.dest_ps++; break; case TX_STATUS_FAIL_HOST_ABORTED: - priv->_agn.reply_tx_stats.host_abort++; + priv->reply_tx_stats.host_abort++; break; case TX_STATUS_FAIL_BT_RETRY: - priv->_agn.reply_tx_stats.bt_retry++; + priv->reply_tx_stats.bt_retry++; break; case TX_STATUS_FAIL_STA_INVALID: - priv->_agn.reply_tx_stats.sta_invalid++; + priv->reply_tx_stats.sta_invalid++; break; case TX_STATUS_FAIL_FRAG_DROPPED: - priv->_agn.reply_tx_stats.frag_drop++; + priv->reply_tx_stats.frag_drop++; break; case TX_STATUS_FAIL_TID_DISABLE: - priv->_agn.reply_tx_stats.tid_disable++; + priv->reply_tx_stats.tid_disable++; break; case TX_STATUS_FAIL_FIFO_FLUSHED: - priv->_agn.reply_tx_stats.fifo_flush++; + priv->reply_tx_stats.fifo_flush++; break; case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL: - priv->_agn.reply_tx_stats.insuff_cf_poll++; + priv->reply_tx_stats.insuff_cf_poll++; break; case TX_STATUS_FAIL_PASSIVE_NO_RX: - priv->_agn.reply_tx_stats.fail_hw_drop++; + priv->reply_tx_stats.fail_hw_drop++; break; case TX_STATUS_FAIL_NO_BEACON_ON_RADAR: - priv->_agn.reply_tx_stats.sta_color_mismatch++; + priv->reply_tx_stats.sta_color_mismatch++; break; default: - priv->_agn.reply_tx_stats.unknown++; + priv->reply_tx_stats.unknown++; break; } } @@ -129,43 +130,43 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status) switch (status) { case AGG_TX_STATE_UNDERRUN_MSK: - priv->_agn.reply_agg_tx_stats.underrun++; + priv->reply_agg_tx_stats.underrun++; break; case AGG_TX_STATE_BT_PRIO_MSK: - priv->_agn.reply_agg_tx_stats.bt_prio++; + priv->reply_agg_tx_stats.bt_prio++; break; case AGG_TX_STATE_FEW_BYTES_MSK: - priv->_agn.reply_agg_tx_stats.few_bytes++; + priv->reply_agg_tx_stats.few_bytes++; break; case AGG_TX_STATE_ABORT_MSK: - priv->_agn.reply_agg_tx_stats.abort++; + priv->reply_agg_tx_stats.abort++; break; case AGG_TX_STATE_LAST_SENT_TTL_MSK: - priv->_agn.reply_agg_tx_stats.last_sent_ttl++; + priv->reply_agg_tx_stats.last_sent_ttl++; break; case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK: - priv->_agn.reply_agg_tx_stats.last_sent_try++; + priv->reply_agg_tx_stats.last_sent_try++; break; case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK: - priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++; + priv->reply_agg_tx_stats.last_sent_bt_kill++; break; case AGG_TX_STATE_SCD_QUERY_MSK: - priv->_agn.reply_agg_tx_stats.scd_query++; + priv->reply_agg_tx_stats.scd_query++; break; case AGG_TX_STATE_TEST_BAD_CRC32_MSK: - priv->_agn.reply_agg_tx_stats.bad_crc32++; + priv->reply_agg_tx_stats.bad_crc32++; break; case AGG_TX_STATE_RESPONSE_MSK: - priv->_agn.reply_agg_tx_stats.response++; + priv->reply_agg_tx_stats.response++; break; case AGG_TX_STATE_DUMP_TX_MSK: - priv->_agn.reply_agg_tx_stats.dump_tx++; + priv->reply_agg_tx_stats.dump_tx++; break; case AGG_TX_STATE_DELAY_TX_MSK: - priv->_agn.reply_agg_tx_stats.delay_tx++; + priv->reply_agg_tx_stats.delay_tx++; break; default: - priv->_agn.reply_agg_tx_stats.unknown++; + priv->reply_agg_tx_stats.unknown++; break; } } @@ -390,8 +391,7 @@ void iwl_check_abort_status(struct iwl_priv *priv, } } -static void iwlagn_rx_reply_tx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -400,6 +400,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct ieee80211_tx_info *info; struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + struct ieee80211_hdr *hdr; struct iwl_tx_info *txb; u32 status = le16_to_cpu(tx_resp->status.status); int tid; @@ -408,9 +409,9 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, unsigned long flags; if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { - IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " - "is out of range [0-%d] %d %d\n", txq_id, - index, txq->q.n_bd, txq->q.write_ptr, + IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) " + "index %d is out of range [0-%d] %d %d\n", __func__, + txq_id, index, txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); return; } @@ -426,6 +427,11 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, IWLAGN_TX_RES_RA_POS; spin_lock_irqsave(&priv->sta_lock, flags); + + hdr = (void *)txb->skb->data; + if (!ieee80211_is_data_qos(hdr->frame_control)) + priv->last_seq_ctl = tx_resp->seq_ctl; + if (txq->sched_retry) { const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp); struct iwl_ht_agg *agg; @@ -438,7 +444,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) { - IWL_WARN(priv, "receive reply tx with bt_kill\n"); + IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n"); } iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); @@ -478,27 +484,6 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, spin_unlock_irqrestore(&priv->sta_lock, flags); } -void iwlagn_rx_handler_setup(struct iwl_priv *priv) -{ - /* init calibration handlers */ - priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = - iwlagn_rx_calib_result; - priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; - - /* set up notification wait support */ - spin_lock_init(&priv->_agn.notif_wait_lock); - INIT_LIST_HEAD(&priv->_agn.notif_waits); - init_waitqueue_head(&priv->_agn.notif_waitq); -} - -void iwlagn_setup_deferred_work(struct iwl_priv *priv) -{ - /* - * nothing need to be done here anymore - * still keep for future use if needed - */ -} - int iwlagn_hw_valid_rtc_data_addr(u32 addr) { return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && @@ -540,8 +525,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv) else tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; - return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd), - &tx_power_cmd); + return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC, + sizeof(tx_power_cmd), &tx_power_cmd); } void iwlagn_temperature(struct iwl_priv *priv) @@ -610,8 +595,7 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) return (address & ADDRESS_MSK) + (offset << 1); } -const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, - size_t offset) +const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) { u32 address = eeprom_indirect_address(priv, offset); BUG_ON(address >= priv->cfg->base_params->eeprom_size); @@ -622,367 +606,12 @@ struct iwl_mod_params iwlagn_mod_params = { .amsdu_size_8K = 1, .restart_fw = 1, .plcp_check = true, + .bt_coex_active = true, + .no_sleep_autoadjust = true, + .power_level = IWL_POWER_INDEX_1, /* the rest are 0 by default */ }; -void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - unsigned long flags; - int i; - spin_lock_irqsave(&rxq->lock, flags); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { - /* In the reset function, these buffers may have been allocated - * to an SKB, so we need to unmap and free potential storage */ - if (rxq->pool[i].page != NULL) { - pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __iwl_free_pages(priv, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - } - - for (i = 0; i < RX_QUEUE_SIZE; i++) - rxq->queue[i] = NULL; - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - spin_unlock_irqrestore(&rxq->lock, flags); -} - -int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - u32 rb_size; - const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ - u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ - - rb_timeout = RX_RB_TIMEOUT; - - if (iwlagn_mod_params.amsdu_size_8K) - rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; - else - rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; - - /* Stop Rx DMA */ - iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - - /* Reset driver's Rx queue write index */ - iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); - - /* Tell device where to find RBD circular buffer in DRAM */ - iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, - (u32)(rxq->bd_dma >> 8)); - - /* Tell device where in DRAM to update its Rx status */ - iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, - rxq->rb_stts_dma >> 4); - - /* Enable Rx DMA - * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in - * the credit mechanism in 5000 HW RX FIFO - * Direct rx interrupts to hosts - * Rx buffer size 4 or 8k - * RB timeout 0x10 - * 256 RBDs - */ - iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | - FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | - FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | - FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | - rb_size| - (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| - (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); - - /* Set interrupt coalescing timer to default (2048 usecs) */ - iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); - - return 0; -} - -static void iwlagn_set_pwr_vmain(struct iwl_priv *priv) -{ -/* - * (for documentation purposes) - * to set power to V_AUX, do: - - if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) - iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VAUX, - ~APMG_PS_CTRL_MSK_PWR_SRC); - */ - - iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, - ~APMG_PS_CTRL_MSK_PWR_SRC); -} - -int iwlagn_hw_nic_init(struct iwl_priv *priv) -{ - unsigned long flags; - struct iwl_rx_queue *rxq = &priv->rxq; - int ret; - - /* nic_init */ - spin_lock_irqsave(&priv->lock, flags); - priv->cfg->ops->lib->apm_ops.init(priv); - - /* Set interrupt coalescing calibration timer to default (512 usecs) */ - iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); - - spin_unlock_irqrestore(&priv->lock, flags); - - iwlagn_set_pwr_vmain(priv); - - priv->cfg->ops->lib->apm_ops.config(priv); - - /* Allocate the RX queue, or reset if it is already allocated */ - if (!rxq->bd) { - ret = iwl_rx_queue_alloc(priv); - if (ret) { - IWL_ERR(priv, "Unable to initialize Rx queue\n"); - return -ENOMEM; - } - } else - iwlagn_rx_queue_reset(priv, rxq); - - iwlagn_rx_replenish(priv); - - iwlagn_rx_init(priv, rxq); - - spin_lock_irqsave(&priv->lock, flags); - - rxq->need_update = 1; - iwl_rx_queue_update_write_ptr(priv, rxq); - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Allocate or reset and init all Tx and Command queues */ - if (!priv->txq) { - ret = iwlagn_txq_ctx_alloc(priv); - if (ret) - return ret; - } else - iwlagn_txq_ctx_reset(priv); - - if (priv->cfg->base_params->shadow_reg_enable) { - /* enable shadow regs in HW */ - iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL, - 0x800FFFFF); - } - - set_bit(STATUS_INIT, &priv->status); - - return 0; -} - -/** - * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr - */ -static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, - dma_addr_t dma_addr) -{ - return cpu_to_le32((u32)(dma_addr >> 8)); -} - -/** - * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' index forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. - */ -void iwlagn_rx_queue_restock(struct iwl_priv *priv) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct list_head *element; - struct iwl_rx_mem_buffer *rxb; - unsigned long flags; - - spin_lock_irqsave(&rxq->lock, flags); - while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { - /* The overwritten rxb must be a used one */ - rxb = rxq->queue[rxq->write]; - BUG_ON(rxb && rxb->page); - - /* Get next free Rx buffer, remove from free list */ - element = rxq->rx_free.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); - - /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, - rxb->page_dma); - rxq->queue[rxq->write] = rxb; - rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; - rxq->free_count--; - } - spin_unlock_irqrestore(&rxq->lock, flags); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - queue_work(priv->workqueue, &priv->rx_replenish); - - - /* If we've added more space for the firmware to place data, tell it. - * Increment device's write pointer in multiples of 8. */ - if (rxq->write_actual != (rxq->write & ~0x7)) { - spin_lock_irqsave(&rxq->lock, flags); - rxq->need_update = 1; - spin_unlock_irqrestore(&rxq->lock, flags); - iwl_rx_queue_update_write_ptr(priv, rxq); - } -} - -/** - * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free - * - * When moving to rx_free an SKB is allocated for the slot. - * - * Also restock the Rx queue via iwl_rx_queue_restock. - * This is called as a scheduled work item (except for during initialization) - */ -void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct list_head *element; - struct iwl_rx_mem_buffer *rxb; - struct page *page; - unsigned long flags; - gfp_t gfp_mask = priority; - - while (1) { - spin_lock_irqsave(&rxq->lock, flags); - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - return; - } - spin_unlock_irqrestore(&rxq->lock, flags); - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (priv->hw_params.rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(priv, "alloc_pages failed, " - "order: %d\n", - priv->hw_params.rx_page_order); - - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ - return; - } - - spin_lock_irqsave(&rxq->lock, flags); - - if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); - __free_pages(page, priv->hw_params.rx_page_order); - return; - } - element = rxq->rx_used.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); - - spin_unlock_irqrestore(&rxq->lock, flags); - - BUG_ON(rxb->page); - rxb->page = page; - /* Get physical address of the RB */ - rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - spin_lock_irqsave(&rxq->lock, flags); - - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - - spin_unlock_irqrestore(&rxq->lock, flags); - } -} - -void iwlagn_rx_replenish(struct iwl_priv *priv) -{ - unsigned long flags; - - iwlagn_rx_allocate(priv, GFP_KERNEL); - - spin_lock_irqsave(&priv->lock, flags); - iwlagn_rx_queue_restock(priv); - spin_unlock_irqrestore(&priv->lock, flags); -} - -void iwlagn_rx_replenish_now(struct iwl_priv *priv) -{ - iwlagn_rx_allocate(priv, GFP_ATOMIC); - - iwlagn_rx_queue_restock(priv); -} - -/* Assumes that the skb field of the buffers in 'pool' is kept accurate. - * If an SKB has been detached, the POOL needs to have its SKB set to NULL - * This free routine walks the list of POOL entries and if SKB is set to - * non NULL it is unmapped and freed - */ -void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) -{ - int i; - for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { - if (rxq->pool[i].page != NULL) { - pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - __iwl_free_pages(priv, rxq->pool[i].page); - rxq->pool[i].page = NULL; - } - } - - dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); - dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - rxq->bd = NULL; - rxq->rb_stts = NULL; -} - -int iwlagn_rxq_stop(struct iwl_priv *priv) -{ - - /* stop Rx DMA */ - iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, - FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); - - return 0; -} - int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) { int idx = 0; @@ -1126,7 +755,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv, static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen) { - struct sk_buff *skb = priv->_agn.offchan_tx_skb; + struct sk_buff *skb = priv->offchan_tx_skb; if (skb->len < maxlen) maxlen = skb->len; @@ -1141,6 +770,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) struct iwl_host_cmd cmd = { .id = REPLY_SCAN_CMD, .len = { sizeof(struct iwl_scan_cmd), }, + .flags = CMD_SYNC, }; struct iwl_scan_cmd *scan; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; @@ -1211,7 +841,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) { scan->suspend_time = 0; scan->max_out_time = - cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout); + cpu_to_le32(1024 * priv->offchan_tx_timeout); } switch (priv->scan_type) { @@ -1399,9 +1029,9 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan_ch = (void *)&scan->data[cmd_len]; scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; scan_ch->channel = - cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value); + cpu_to_le16(priv->offchan_tx_chan->hw_value); scan_ch->active_dwell = - cpu_to_le16(priv->_agn.offchan_tx_timeout); + cpu_to_le16(priv->offchan_tx_timeout); scan_ch->passive_dwell = 0; /* Set txpower levels to defaults */ @@ -1411,7 +1041,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) * power level: * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; */ - if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ) + if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ) scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; else scan_ch->tx_gain = ((1 << 5) | (5 << 3)); @@ -1433,17 +1063,14 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) /* set scan bit here for PAN params */ set_bit(STATUS_SCAN_HW, &priv->status); - if (priv->cfg->ops->hcmd->set_pan_params) { - ret = priv->cfg->ops->hcmd->set_pan_params(priv); - if (ret) - return ret; - } + ret = iwlagn_set_pan_params(priv); + if (ret) + return ret; - ret = iwl_send_cmd_sync(priv, &cmd); + ret = trans_send_cmd(&priv->trans, &cmd); if (ret) { clear_bit(STATUS_SCAN_HW, &priv->status); - if (priv->cfg->ops->hcmd->set_pan_params) - priv->cfg->ops->hcmd->set_pan_params(priv); + iwlagn_set_pan_params(priv); } return ret; @@ -1528,23 +1155,32 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) might_sleep(); memset(&flush_cmd, 0, sizeof(flush_cmd)); - flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK | - IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK; - if (priv->cfg->sku & IWL_SKU_N) + if (flush_control & BIT(IWL_RXON_CTX_BSS)) + flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | + IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | + IWL_SCD_MGMT_MSK; + if ((flush_control & BIT(IWL_RXON_CTX_PAN)) && + (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) + flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK | + IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK | + IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | + IWL_PAN_SCD_MULTICAST_MSK; + + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", flush_cmd.fifo_control); flush_cmd.flush_control = cpu_to_le16(flush_control); - return iwl_send_cmd(priv, &cmd); + return trans_send_cmd(&priv->trans, &cmd); } void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) { mutex_lock(&priv->mutex); ieee80211_stop_queues(priv->hw); - if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) { + if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { IWL_ERR(priv, "flush request fail\n"); goto done; } @@ -1699,18 +1335,21 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) * (might be in monitor mode), or the interface is in * IBSS mode (no proper uCode support for coex then). */ - if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) { + if (!iwlagn_mod_params.bt_coex_active || + priv->iw_mode == NL80211_IFTYPE_ADHOC) { basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; } else { basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << IWLAGN_BT_FLAG_COEX_MODE_SHIFT; - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_sco_disable) + + if (!priv->bt_enable_pspoll) basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; + else + basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; if (priv->bt_ch_announce) basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; - IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags); + IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags); } priv->bt_enable_flag = basic.flags; if (priv->bt_full_concurrent) @@ -1720,7 +1359,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup, sizeof(iwlagn_def_3w_lookup)); - IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n", + IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n", basic.flags ? "active" : "disabled", priv->bt_full_concurrent ? "full concurrency" : "3-wire"); @@ -1728,19 +1367,97 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) if (priv->cfg->bt_params->bt_session_2) { memcpy(&bt_cmd_2000.basic, &basic, sizeof(basic)); - ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, - sizeof(bt_cmd_2000), &bt_cmd_2000); + ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG, + CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000); } else { memcpy(&bt_cmd_6000.basic, &basic, sizeof(basic)); - ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, - sizeof(bt_cmd_6000), &bt_cmd_6000); + ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG, + CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000); } if (ret) IWL_ERR(priv, "failed to send BT Coex Config\n"); } +void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena) +{ + struct iwl_rxon_context *ctx, *found_ctx = NULL; + bool found_ap = false; + + lockdep_assert_held(&priv->mutex); + + /* Check whether AP or GO mode is active. */ + if (rssi_ena) { + for_each_context(priv, ctx) { + if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP && + iwl_is_associated_ctx(ctx)) { + found_ap = true; + break; + } + } + } + + /* + * If disable was received or If GO/AP mode, disable RSSI + * measurements. + */ + if (!rssi_ena || found_ap) { + if (priv->cur_rssi_ctx) { + ctx = priv->cur_rssi_ctx; + ieee80211_disable_rssi_reports(ctx->vif); + priv->cur_rssi_ctx = NULL; + } + return; + } + + /* + * If rssi measurements need to be enabled, consider all cases now. + * Figure out how many contexts are active. + */ + for_each_context(priv, ctx) { + if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && + iwl_is_associated_ctx(ctx)) { + found_ctx = ctx; + break; + } + } + + /* + * rssi monitor already enabled for the correct interface...nothing + * to do. + */ + if (found_ctx == priv->cur_rssi_ctx) + return; + + /* + * Figure out if rssi monitor is currently enabled, and needs + * to be changed. If rssi monitor is already enabled, disable + * it first else just enable rssi measurements on the + * interface found above. + */ + if (priv->cur_rssi_ctx) { + ctx = priv->cur_rssi_ctx; + if (ctx->vif) + ieee80211_disable_rssi_reports(ctx->vif); + } + + priv->cur_rssi_ctx = found_ctx; + + if (!found_ctx) + return; + + ieee80211_enable_rssi_reports(found_ctx->vif, + IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD, + IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD); +} + +static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg) +{ + return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >> + BT_UART_MSG_FRAME3SCOESCO_POS; +} + static void iwlagn_bt_traffic_change_work(struct work_struct *work) { struct iwl_priv *priv = @@ -1758,7 +1475,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work) * coex profile notifications. Ignore that since only bad consequence * can be not matching debug print with actual state. */ - IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n", + IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n", priv->bt_traffic_load); switch (priv->bt_traffic_load) { @@ -1793,23 +1510,43 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work) if (test_bit(STATUS_SCAN_HW, &priv->status)) goto out; - if (priv->cfg->ops->lib->update_chain_flags) - priv->cfg->ops->lib->update_chain_flags(priv); + iwl_update_chain_flags(priv); if (smps_request != -1) { + priv->current_ht_config.smps = smps_request; for_each_context(priv, ctx) { if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) ieee80211_request_smps(ctx->vif, smps_request); } } + + /* + * Dynamic PS poll related functionality. Adjust RSSI measurements if + * necessary. + */ + iwlagn_bt_coex_rssi_monitor(priv); out: mutex_unlock(&priv->mutex); } +/* + * If BT sco traffic, and RSSI monitor is enabled, move measurements to the + * correct interface or disable it if this is the last interface to be + * removed. + */ +void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv) +{ + if (priv->bt_is_sco && + priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS) + iwlagn_bt_adjust_rssi_monitor(priv, true); + else + iwlagn_bt_adjust_rssi_monitor(priv, false); +} + static void iwlagn_print_uartmsg(struct iwl_priv *priv, struct iwl_bt_uart_msg *uart_msg) { - IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, " + IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, " "Update Req = 0x%X", (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >> BT_UART_MSG_FRAME1MSGTYPE_POS, @@ -1818,7 +1555,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >> BT_UART_MSG_FRAME1UPDATEREQ_POS); - IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, " + IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, " "Chl_SeqN = 0x%X, In band = 0x%X", (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >> BT_UART_MSG_FRAME2OPENCONNECTIONS_POS, @@ -1829,7 +1566,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >> BT_UART_MSG_FRAME2INBAND_POS); - IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, " + IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, " "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X", (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> BT_UART_MSG_FRAME3SCOESCO_POS, @@ -1844,11 +1581,11 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >> BT_UART_MSG_FRAME3OBEX_POS); - IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X", + IWL_DEBUG_COEX(priv, "Idle duration = 0x%X", (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >> BT_UART_MSG_FRAME4IDLEDURATION_POS); - IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, " + IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, " "eSCO Retransmissions = 0x%X", (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >> BT_UART_MSG_FRAME5TXACTIVITY_POS, @@ -1857,13 +1594,13 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >> BT_UART_MSG_FRAME5ESCORETRANSMIT_POS); - IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X", + IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X", (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >> BT_UART_MSG_FRAME6SNIFFINTERVAL_POS, (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> BT_UART_MSG_FRAME6DISCOVERABLE_POS); - IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = " + IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = " "0x%X, Inquiry = 0x%X, Connectable = 0x%X", (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, @@ -1913,14 +1650,16 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, return; } - IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n"); - IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status); - IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load); - IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n", + IWL_DEBUG_COEX(priv, "BT Coex notification:\n"); + IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status); + IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load); + IWL_DEBUG_COEX(priv, " CI compliance: %d\n", coex->bt_ci_compliance); iwlagn_print_uartmsg(priv, uart_msg); priv->last_bt_traffic_load = priv->bt_traffic_load; + priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg); + if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { if (priv->bt_status != coex->bt_status || priv->last_bt_traffic_load != coex->bt_traffic_load) { @@ -1954,15 +1693,12 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv) { - iwlagn_rx_handler_setup(priv); priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] = iwlagn_bt_coex_profile_notif; } void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv) { - iwlagn_setup_deferred_work(priv); - INIT_WORK(&priv->bt_traffic_change_work, iwlagn_bt_traffic_change_work); } @@ -2274,9 +2010,9 @@ void iwlagn_init_notification_wait(struct iwl_priv *priv, wait_entry->triggered = false; wait_entry->aborted = false; - spin_lock_bh(&priv->_agn.notif_wait_lock); - list_add(&wait_entry->list, &priv->_agn.notif_waits); - spin_unlock_bh(&priv->_agn.notif_wait_lock); + spin_lock_bh(&priv->notif_wait_lock); + list_add(&wait_entry->list, &priv->notif_waits); + spin_unlock_bh(&priv->notif_wait_lock); } int iwlagn_wait_notification(struct iwl_priv *priv, @@ -2285,13 +2021,13 @@ int iwlagn_wait_notification(struct iwl_priv *priv, { int ret; - ret = wait_event_timeout(priv->_agn.notif_waitq, + ret = wait_event_timeout(priv->notif_waitq, wait_entry->triggered || wait_entry->aborted, timeout); - spin_lock_bh(&priv->_agn.notif_wait_lock); + spin_lock_bh(&priv->notif_wait_lock); list_del(&wait_entry->list); - spin_unlock_bh(&priv->_agn.notif_wait_lock); + spin_unlock_bh(&priv->notif_wait_lock); if (wait_entry->aborted) return -EIO; @@ -2305,91 +2041,7 @@ int iwlagn_wait_notification(struct iwl_priv *priv, void iwlagn_remove_notification(struct iwl_priv *priv, struct iwl_notification_wait *wait_entry) { - spin_lock_bh(&priv->_agn.notif_wait_lock); + spin_lock_bh(&priv->notif_wait_lock); list_del(&wait_entry->list); - spin_unlock_bh(&priv->_agn.notif_wait_lock); -} - -int iwlagn_start_device(struct iwl_priv *priv) -{ - int ret; - - if (iwl_prepare_card_hw(priv)) { - IWL_WARN(priv, "Exit HW not ready\n"); - return -EIO; - } - - /* If platform's RF_KILL switch is NOT set to KILL */ - if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) - clear_bit(STATUS_RF_KILL_HW, &priv->status); - else - set_bit(STATUS_RF_KILL_HW, &priv->status); - - if (iwl_is_rfkill(priv)) { - wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); - iwl_enable_interrupts(priv); - return -ERFKILL; - } - - iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - - ret = iwlagn_hw_nic_init(priv); - if (ret) { - IWL_ERR(priv, "Unable to init nic\n"); - return ret; - } - - /* make sure rfkill handshake bits are cleared */ - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - /* clear (again), then enable host interrupts */ - iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - iwl_enable_interrupts(priv); - - /* really make sure rfkill handshake bits are cleared */ - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); - - return 0; -} - -void iwlagn_stop_device(struct iwl_priv *priv) -{ - unsigned long flags; - - /* stop and reset the on-board processor */ - iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /* tell the device to stop sending interrupts */ - spin_lock_irqsave(&priv->lock, flags); - iwl_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - iwl_synchronize_irq(priv); - - /* device going down, Stop using ICT table */ - iwl_disable_ict(priv); - - /* - * If a HW restart happens during firmware loading, - * then the firmware loading might call this function - * and later it might be called again due to the - * restart. So don't process again if the device is - * already dead. - */ - if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) { - iwlagn_txq_ctx_stop(priv); - iwlagn_rxq_stop(priv); - - /* Power-down device's busmaster DMA clocks */ - iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); - } - - /* Make sure (redundant) we've released our request to stay awake */ - iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - - /* Stop the device, and put it in low power state */ - iwl_apm_stop(priv); + spin_unlock_bh(&priv->notif_wait_lock); } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 592b0cfcf71..3789ff4bf53 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -336,6 +336,12 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, } #ifdef CONFIG_MAC80211_DEBUGFS +/** + * Program the device to use fixed rate for frame transmit + * This is for debugging/testing only + * once the device start use fixed rate, we need to reload the module + * to being back the normal operation. + */ static void rs_program_fix_rate(struct iwl_priv *priv, struct iwl_lq_sta *lq_sta) { @@ -348,13 +354,17 @@ static void rs_program_fix_rate(struct iwl_priv *priv, lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->dbg_fixed_rate = priv->dbg_fixed_rate; +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL + /* testmode has higher priority to overwirte the fixed rate */ + if (priv->tm_fixed_rate) + lq_sta->dbg_fixed_rate = priv->tm_fixed_rate; +#endif IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", - lq_sta->lq.sta_id, priv->dbg_fixed_rate); + lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); - if (priv->dbg_fixed_rate) { - rs_fill_link_cmd(NULL, lq_sta, priv->dbg_fixed_rate); + if (lq_sta->dbg_fixed_rate) { + rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false); } @@ -426,7 +436,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, ieee80211_stop_tx_ba_session(sta, tid); } } else { - IWL_ERR(priv, "Aggregation not enabled for tid %d " + IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d " "because load = %u\n", tid, load); } return ret; @@ -1072,8 +1082,10 @@ done: /* See if there's a better rate or modulation mode to try. */ if (sta && sta->supp_rates[sband->band]) rs_rate_scale_perform(priv, skb, sta, lq_sta); -#ifdef CONFIG_MAC80211_DEBUGFS - if (priv->dbg_fixed_rate != lq_sta->dbg_fixed_rate) + +#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL) + if ((priv->tm_fixed_rate) && + (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) @@ -2895,8 +2907,9 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i if (sband->band == IEEE80211_BAND_5GHZ) lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; - - priv->dbg_fixed_rate = 0; +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL + priv->tm_fixed_rate = 0; +#endif #ifdef CONFIG_MAC80211_DEBUGFS lq_sta->dbg_fixed_rate = 0; #endif @@ -3095,7 +3108,6 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); } else { lq_sta->dbg_fixed_rate = 0; - priv->dbg_fixed_rate = 0; IWL_ERR(priv, "Invalid antenna selection 0x%X, Valid is 0x%X\n", ant_sel_tx, valid_tx_ant); @@ -3123,9 +3135,9 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, return -EFAULT; if (sscanf(buf, "%x", &parsed_rate) == 1) - priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = parsed_rate; + lq_sta->dbg_fixed_rate = parsed_rate; else - priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = 0; + lq_sta->dbg_fixed_rate = 0; rs_program_fix_rate(priv, lq_sta); @@ -3155,7 +3167,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, lq_sta->total_failed, lq_sta->total_success, lq_sta->active_legacy_rate); desc += sprintf(buff+desc, "fixed rate 0x%X\n", - priv->dbg_fixed_rate); + lq_sta->dbg_fixed_rate); desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 09f679d6046..d42ef1763a7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -30,6 +30,7 @@ #include "iwl-core.h" #include "iwl-agn-calib.h" #include "iwl-helpers.h" +#include "iwl-trans.h" static int iwlagn_disable_bss(struct iwl_priv *priv, struct iwl_rxon_context *ctx, @@ -39,7 +40,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv, int ret; send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send); + ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, + CMD_SYNC, sizeof(*send), send); send->filter_flags = old_filter; @@ -64,7 +66,8 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; send->dev_type = RXON_DEV_TYPE_P2P; - ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send); + ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, + CMD_SYNC, sizeof(*send), send); send->filter_flags = old_filter; send->dev_type = old_dev_type; @@ -81,6 +84,22 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, return ret; } +static int iwlagn_disconn_pan(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_rxon_cmd *send) +{ + __le32 old_filter = send->filter_flags; + int ret; + + send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC, + sizeof(*send), send); + + send->filter_flags = old_filter; + + return ret; +} + static void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { @@ -102,7 +121,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv, ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags); - ret = iwl_send_cmd_pdu(priv, ctx->qos_cmd, + ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC, sizeof(struct iwl_qosparam_cmd), &ctx->qos_data.def_qos_parm); if (ret) @@ -161,11 +180,8 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv, ctx->staging.ofdm_ht_triple_stream_basic_rates; rxon_assoc.acquisition_data = ctx->staging.acquisition_data; - ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd, - sizeof(rxon_assoc), &rxon_assoc, NULL); - if (ret) - return ret; - + ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd, + CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc); return ret; } @@ -175,10 +191,21 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv, int ret; struct iwl_rxon_cmd *active = (void *)&ctx->active; - if (ctx->ctxid == IWL_RXON_CTX_BSS) + if (ctx->ctxid == IWL_RXON_CTX_BSS) { ret = iwlagn_disable_bss(priv, ctx, &ctx->staging); - else + } else { ret = iwlagn_disable_pan(priv, ctx, &ctx->staging); + if (ret) + return ret; + if (ctx->vif) { + ret = iwl_send_rxon_timing(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); + return ret; + } + ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging); + } + } if (ret) return ret; @@ -187,6 +214,8 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv, * keys, so we have to restore those afterwards. */ iwl_clear_ucode_stations(priv, ctx); + /* update -- might need P2P now */ + iwl_update_bcast_station(priv, ctx); iwl_restore_stations(priv, ctx); ret = iwl_restore_default_wep_keys(priv, ctx); if (ret) { @@ -205,10 +234,12 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, struct iwl_rxon_cmd *active = (void *)&ctx->active; /* RXON timing must be before associated RXON */ - ret = iwl_send_rxon_timing(priv, ctx); - if (ret) { - IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); - return ret; + if (ctx->ctxid == IWL_RXON_CTX_BSS) { + ret = iwl_send_rxon_timing(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); + return ret; + } } /* QoS info may be cleared by previous un-assoc RXON */ iwlagn_update_qos(priv, ctx); @@ -235,7 +266,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, * Associated RXON doesn't clear the station table in uCode, * so we don't need to restore stations etc. after this. */ - ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, + ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC, sizeof(struct iwl_rxon_cmd), &ctx->staging); if (ret) { IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); @@ -263,9 +294,107 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, IWL_ERR(priv, "Error sending TX power (%d)\n", ret); return ret; } + + if ((ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) && + priv->cfg->ht_params->smps_mode) + ieee80211_request_smps(ctx->vif, + priv->cfg->ht_params->smps_mode); + return 0; } +int iwlagn_set_pan_params(struct iwl_priv *priv) +{ + struct iwl_wipan_params_cmd cmd; + struct iwl_rxon_context *ctx_bss, *ctx_pan; + int slot0 = 300, slot1 = 0; + int ret; + + if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS)) + return 0; + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + lockdep_assert_held(&priv->mutex); + + ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; + ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; + + /* + * If the PAN context is inactive, then we don't need + * to update the PAN parameters, the last thing we'll + * have done before it goes inactive is making the PAN + * parameters be WLAN-only. + */ + if (!ctx_pan->is_active) + return 0; + + memset(&cmd, 0, sizeof(cmd)); + + /* only 2 slots are currently allowed */ + cmd.num_slots = 2; + + cmd.slots[0].type = 0; /* BSS */ + cmd.slots[1].type = 1; /* PAN */ + + if (priv->hw_roc_channel) { + /* both contexts must be used for this to happen */ + slot1 = priv->hw_roc_duration; + slot0 = IWL_MIN_SLOT_TIME; + } else if (ctx_bss->vif && ctx_pan->vif) { + int bcnint = ctx_pan->beacon_int; + int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; + + /* should be set, but seems unused?? */ + cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); + + if (ctx_pan->vif->type == NL80211_IFTYPE_AP && + bcnint && + bcnint != ctx_bss->beacon_int) { + IWL_ERR(priv, + "beacon intervals don't match (%d, %d)\n", + ctx_bss->beacon_int, ctx_pan->beacon_int); + } else + bcnint = max_t(int, bcnint, + ctx_bss->beacon_int); + if (!bcnint) + bcnint = DEFAULT_BEACON_INTERVAL; + slot0 = bcnint / 2; + slot1 = bcnint - slot0; + + if (test_bit(STATUS_SCAN_HW, &priv->status) || + (!ctx_bss->vif->bss_conf.idle && + !ctx_bss->vif->bss_conf.assoc)) { + slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; + slot1 = IWL_MIN_SLOT_TIME; + } else if (!ctx_pan->vif->bss_conf.idle && + !ctx_pan->vif->bss_conf.assoc) { + slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME; + slot0 = IWL_MIN_SLOT_TIME; + } + } else if (ctx_pan->vif) { + slot0 = 0; + slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) * + ctx_pan->beacon_int; + slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); + + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; + slot1 = IWL_MIN_SLOT_TIME; + } + } + + cmd.slots[0].width = cpu_to_le16(slot0); + cmd.slots[1].width = cpu_to_le16(slot1); + + ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); + + return ret; +} + /** * iwlagn_commit_rxon - commit staging_rxon to hardware * @@ -308,8 +437,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* always get timestamp with Rx frame */ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; - if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) { - struct ieee80211_channel *chan = priv->_agn.hw_roc_channel; + if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->hw_roc_channel) { + struct ieee80211_channel *chan = priv->hw_roc_channel; iwl_set_rxon_channel(priv, chan, ctx); iwl_set_flags_for_band(priv, ctx, chan->band, NULL); @@ -375,13 +504,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) * do it now if after settings changed. */ iwl_set_tx_power(priv, priv->tx_power_next, false); - return 0; - } - if (priv->cfg->ops->hcmd->set_pan_params) { - ret = priv->cfg->ops->hcmd->set_pan_params(priv); - if (ret) - return ret; + /* make sure we are in the right PS state */ + iwl_power_update_mode(priv, true); + + return 0; } iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto); @@ -405,6 +532,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) if (ret) return ret; + ret = iwlagn_set_pan_params(priv); + if (ret) + return ret; + if (new_assoc) return iwlagn_rxon_connect(priv, ctx); @@ -446,9 +577,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) * set up the SM PS mode to OFF if an HT channel is * configured. */ - if (priv->cfg->ops->hcmd->set_rxon_chain) - for_each_context(priv, ctx) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + for_each_context(priv, ctx) + iwlagn_set_rxon_chain(priv, ctx); } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { @@ -636,6 +766,38 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv, ht_conf->single_chain_sufficient = !need_multiple; } +static void iwlagn_chain_noise_reset(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + int ret; + + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && + iwl_is_any_associated(priv)) { + struct iwl_calib_chain_noise_reset_cmd cmd; + + /* clear data for chain noise calibration algorithm */ + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + + memset(&cmd, 0, sizeof(cmd)); + iwl_set_calib_hdr(&cmd.hdr, + priv->phy_calib_chain_noise_reset_cmd); + ret = trans_send_cmd_pdu(&priv->trans, + REPLY_PHY_CALIBRATION_CMD, + CMD_SYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, + "Could not send REPLY_PHY_CALIBRATION_CMD\n"); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); + } +} + void iwlagn_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -692,7 +854,12 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, iwl_wake_any_queue(priv, ctx); } ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + if (ctx->ctxid == IWL_RXON_CTX_BSS) + priv->have_rekey_data = false; } + + iwlagn_bt_coex_rssi_monitor(priv); } if (ctx->ht.enabled) { @@ -704,8 +871,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, iwl_set_rxon_ht(priv, &priv->current_ht_config); } - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; @@ -743,7 +909,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, iwl_power_update_mode(priv, false); /* Enable RX differential gain and sensitivity calibrations */ - iwl_chain_noise_reset(priv); + if (!priv->disable_chain_noise_cal) + iwlagn_chain_noise_reset(priv); priv->start_calib = 1; } @@ -770,6 +937,13 @@ void iwlagn_post_scan(struct iwl_priv *priv) struct iwl_rxon_context *ctx; /* + * We do not commit power settings while scan is pending, + * do it now if the settings changed. + */ + iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false); + iwl_set_tx_power(priv, priv->tx_power_next, false); + + /* * Since setting the RXON may have been deferred while * performing the scan, fire one off if needed */ @@ -777,6 +951,5 @@ void iwlagn_post_scan(struct iwl_priv *priv) if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) iwlagn_commit_rxon(priv, ctx); - if (priv->cfg->ops->hcmd->set_pan_params) - priv->cfg->ops->hcmd->set_pan_params(priv); + iwlagn_set_pan_params(priv); } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index 0bd722cee5a..37e624095e4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -33,9 +33,10 @@ #include "iwl-core.h" #include "iwl-sta.h" #include "iwl-agn.h" +#include "iwl-trans.h" static struct iwl_link_quality_cmd * -iwl_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id) +iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id) { int i, r; struct iwl_link_quality_cmd *link_cmd; @@ -47,10 +48,15 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id) IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); return NULL; } + + lockdep_assert_held(&priv->mutex); + /* Set up the rate scaling to start at selected rate, fall back * all the way down to 1M in IEEE order, and then spin on 1M */ if (priv->band == IEEE80211_BAND_5GHZ) r = IWL_RATE_6M_INDEX; + else if (ctx && ctx->vif && ctx->vif->p2p) + r = IWL_RATE_6M_INDEX; else r = IWL_RATE_1M_INDEX; @@ -115,7 +121,7 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx spin_unlock_irqrestore(&priv->sta_lock, flags); /* Set up default rate scaling table in device's station table */ - link_cmd = iwl_sta_alloc_lq(priv, sta_id); + link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n", addr); @@ -133,6 +139,14 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx return 0; } +/* + * static WEP keys + * + * For each context, the device has a table of 4 static WEP keys + * (one for each key index) that is updated with the following + * commands. + */ + static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, bool send_if_empty) @@ -175,7 +189,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, cmd.len[0] = cmd_size; if (not_empty || send_if_empty) - return iwl_send_cmd(priv, &cmd); + return trans_send_cmd(&priv->trans, &cmd); else return 0; } @@ -226,9 +240,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv, return -EINVAL; } - keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->hw_key_idx = HW_KEY_DEFAULT; - priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; + keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT; ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, @@ -241,166 +253,117 @@ int iwl_set_default_wep_key(struct iwl_priv *priv, return ret; } -static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - unsigned long flags; - __le16 key_flags = 0; - struct iwl_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; - - key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (keyconf->keylen == WEP_KEY_LEN_128) - key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - spin_lock_irqsave(&priv->sta_lock, flags); - - priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; - priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; - priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; - - memcpy(priv->stations[sta_id].keyinfo.key, - keyconf->key, keyconf->keylen); - - memcpy(&priv->stations[sta_id].sta.key.key[3], - keyconf->key, keyconf->keylen); - - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) - == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = - iwl_get_free_ucode_key_index(priv); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ +/* + * dynamic (per-station) keys + * + * The dynamic keys are a little more complicated. The device has + * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys. + * These are linked to stations by a table that contains an index + * into the key table for each station/key index/{mcast,unicast}, + * i.e. it's basically an array of pointers like this: + * key_offset_t key_mapping[NUM_STATIONS][4][2]; + * (it really works differently, but you can think of it as such) + * + * The key uploading and linking happens in the same command, the + * add station command with STA_MODIFY_KEY_MASK. + */ - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); +static u8 iwlagn_key_sta_id(struct iwl_priv *priv, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + u8 sta_id = IWL_INVALID_STATION; - priv->stations[sta_id].sta.key.key_flags = key_flags; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + if (sta) + sta_id = iwl_sta_id(sta); - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); + /* + * The device expects GTKs for station interfaces to be + * installed as GTKs for the AP station. If we have no + * station ID, then use the ap_sta_id in that case. + */ + if (!sta && vif && vif_priv->ctx) { + switch (vif->type) { + case NL80211_IFTYPE_STATION: + sta_id = vif_priv->ctx->ap_sta_id; + break; + default: + /* + * In all other cases, the key will be + * used either for TX only or is bound + * to a station already. + */ + break; + } + } - return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); + return sta_id; } -static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - u8 sta_id) +static int iwlagn_send_sta_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k, + u32 cmd_flags) { unsigned long flags; - __le16 key_flags = 0; + __le16 key_flags; struct iwl_addsta_cmd sta_cmd; - - lockdep_assert_held(&priv->mutex); - - key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; - - if (sta_id == ctx->bcast_sta_id) - key_flags |= STA_KEY_MULTICAST_MSK; - - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + int i; spin_lock_irqsave(&priv->sta_lock, flags); - priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; - priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; - - memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, - keyconf->keylen); - - memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, - keyconf->keylen); - - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) - == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = - iwl_get_free_ucode_key_index(priv); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - priv->stations[sta_id].sta.key.key_flags = key_flags; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); - return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); -} + key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags |= STA_KEY_FLG_MAP_KEY_MSK; -static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, - u8 sta_id) -{ - unsigned long flags; - int ret = 0; - __le16 key_flags = 0; - - key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); - key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); - key_flags &= ~STA_KEY_FLG_INVALID; + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + key_flags |= STA_KEY_FLG_CCMP; + memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); + break; + case WLAN_CIPHER_SUITE_TKIP: + key_flags |= STA_KEY_FLG_TKIP; + sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32; + for (i = 0; i < 5; i++) + sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); + memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); + break; + case WLAN_CIPHER_SUITE_WEP104: + key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + key_flags |= STA_KEY_FLG_WEP; + memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen); + break; + default: + WARN_ON(1); + return -EINVAL; + } - if (sta_id == ctx->bcast_sta_id) + if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) key_flags |= STA_KEY_MULTICAST_MSK; - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - - spin_lock_irqsave(&priv->sta_lock, flags); - - priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; - priv->stations[sta_id].keyinfo.keylen = 16; - - if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) - == STA_KEY_FLG_NO_ENC) - priv->stations[sta_id].sta.key.key_offset = - iwl_get_free_ucode_key_index(priv); - /* else, we are overriding an existing key => no need to allocated room - * in uCode. */ - - WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, - "no space for a new key"); - - priv->stations[sta_id].sta.key.key_flags = key_flags; - + /* key pointer (offset) */ + sta_cmd.key.key_offset = keyconf->hw_key_idx; - /* This copy is acutally not needed: we get the key with each TX */ - memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); + sta_cmd.key.key_flags = key_flags; + sta_cmd.mode = STA_CONTROL_MODIFY_MSK; + sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; - memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16); - - spin_unlock_irqrestore(&priv->sta_lock, flags); - - return ret; + return iwl_send_add_sta(priv, &sta_cmd, cmd_flags); } void iwl_update_tkip_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, + struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { - u8 sta_id; - unsigned long flags; - int i; + u8 sta_id = iwlagn_key_sta_id(priv, vif, sta); + + if (sta_id == IWL_INVALID_STATION) + return; if (iwl_scan_cancel(priv)) { /* cancel scan failed, just live w/ bad key and rely @@ -408,121 +371,110 @@ void iwl_update_tkip_key(struct iwl_priv *priv, return; } - sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta); - if (sta_id == IWL_INVALID_STATION) - return; - - spin_lock_irqsave(&priv->sta_lock, flags); - - priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; - - for (i = 0; i < 5; i++) - priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = - cpu_to_le16(phase1key[i]); - - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - - iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); - - spin_unlock_irqrestore(&priv->sta_lock, flags); - + iwlagn_send_sta_key(priv, keyconf, sta_id, + iv32, phase1key, CMD_ASYNC); } int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_key_conf *keyconf, - u8 sta_id) + struct ieee80211_sta *sta) { unsigned long flags; - u16 key_flags; - u8 keyidx; struct iwl_addsta_cmd sta_cmd; + u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta); + + /* if station isn't there, neither is the key */ + if (sta_id == IWL_INVALID_STATION) + return -ENOENT; + + spin_lock_irqsave(&priv->sta_lock, flags); + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); + if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) + sta_id = IWL_INVALID_STATION; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + if (sta_id == IWL_INVALID_STATION) + return 0; lockdep_assert_held(&priv->mutex); ctx->key_mapping_keys--; - spin_lock_irqsave(&priv->sta_lock, flags); - key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); - keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; - IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); - if (keyconf->keyidx != keyidx) { - /* We need to remove a key with index different that the one - * in the uCode. This means that the key we need to remove has - * been replaced by another one with different index. - * Don't do anything and return ok - */ - spin_unlock_irqrestore(&priv->sta_lock, flags); - return 0; - } - - if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { - IWL_WARN(priv, "Removing wrong key %d 0x%x\n", - keyconf->keyidx, key_flags); - spin_unlock_irqrestore(&priv->sta_lock, flags); - return 0; - } - - if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, - &priv->ucode_key_table)) - IWL_ERR(priv, "index %d not used in uCode key table.\n", - priv->stations[sta_id].sta.key.key_offset); - memset(&priv->stations[sta_id].keyinfo, 0, - sizeof(struct iwl_hw_key)); - memset(&priv->stations[sta_id].sta.key, 0, - sizeof(struct iwl_keyinfo)); - priv->stations[sta_id].sta.key.key_flags = - STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; - priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; - priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; - priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table)) + IWL_ERR(priv, "offset %d not used in uCode key table.\n", + keyconf->hw_key_idx); - if (iwl_is_rfkill(priv)) { - IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n"); - spin_unlock_irqrestore(&priv->sta_lock, flags); - return 0; - } - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); - spin_unlock_irqrestore(&priv->sta_lock, flags); + sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; + sta_cmd.key.key_offset = WEP_INVALID_OFFSET; + sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; + sta_cmd.mode = STA_CONTROL_MODIFY_MSK; return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); } -int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *keyconf, u8 sta_id) +int iwl_set_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta) { + struct ieee80211_key_seq seq; + u16 p1k[5]; int ret; + u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta); + const u8 *addr; + + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; lockdep_assert_held(&priv->mutex); + keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv); + if (keyconf->hw_key_idx == WEP_INVALID_OFFSET) + return -ENOSPC; + ctx->key_mapping_keys++; - keyconf->hw_key_idx = HW_KEY_DYNAMIC; switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id); - break; case WLAN_CIPHER_SUITE_TKIP: - ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id); + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + if (sta) + addr = sta->addr; + else /* station mode case only */ + addr = ctx->active.bssid_addr; + + /* pre-fill phase 1 key into device cache */ + ieee80211_get_key_rx_seq(keyconf, 0, &seq); + ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); + ret = iwlagn_send_sta_key(priv, keyconf, sta_id, + seq.tkip.iv32, p1k, CMD_SYNC); break; + case WLAN_CIPHER_SUITE_CCMP: + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + /* fall through */ case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id); + ret = iwlagn_send_sta_key(priv, keyconf, sta_id, + 0, NULL, CMD_SYNC); break; default: - IWL_ERR(priv, - "Unknown alg: %s cipher = %x\n", __func__, - keyconf->cipher); + IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher); ret = -EINVAL; } - IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", + if (ret) { + ctx->key_mapping_keys--; + clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table); + } + + IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta_id, ret); + sta ? sta->addr : NULL, ret); return ret; } @@ -554,7 +506,7 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv, priv->stations[sta_id].used |= IWL_STA_BCAST; spin_unlock_irqrestore(&priv->sta_lock, flags); - link_cmd = iwl_sta_alloc_lq(priv, sta_id); + link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n"); @@ -574,14 +526,14 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv, * Only used by iwlagn. Placed here to have all bcast station management * code together. */ -static int iwl_update_bcast_station(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) +int iwl_update_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) { unsigned long flags; struct iwl_link_quality_cmd *link_cmd; u8 sta_id = ctx->bcast_sta_id; - link_cmd = iwl_sta_alloc_lq(priv, sta_id); + link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n"); return -ENOMEM; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c index 348f74f1c8e..f501d742984 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c @@ -198,7 +198,7 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data) /* Reschedule the ct_kill timer to occur in * CT_KILL_EXIT_DURATION seconds to ensure we get a * thermal update */ - IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n"); + IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n"); mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies + CT_KILL_EXIT_DURATION * HZ); } @@ -208,15 +208,15 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv, bool stop) { if (stop) { - IWL_DEBUG_POWER(priv, "Stop all queues\n"); + IWL_DEBUG_TEMP(priv, "Stop all queues\n"); if (priv->mac80211_registered) ieee80211_stop_queues(priv->hw); - IWL_DEBUG_POWER(priv, + IWL_DEBUG_TEMP(priv, "Schedule 5 seconds CT_KILL Timer\n"); mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies + CT_KILL_EXIT_DURATION * HZ); } else { - IWL_DEBUG_POWER(priv, "Wake all queues\n"); + IWL_DEBUG_TEMP(priv, "Wake all queues\n"); if (priv->mac80211_registered) ieee80211_wake_queues(priv->hw); } @@ -232,7 +232,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data) /* temperature timer expired, ready to go into CT_KILL state */ if (tt->state != IWL_TI_CT_KILL) { - IWL_DEBUG_POWER(priv, "entering CT_KILL state when " + IWL_DEBUG_TEMP(priv, "entering CT_KILL state when " "temperature timer expired\n"); tt->state = IWL_TI_CT_KILL; set_bit(STATUS_CT_KILL, &priv->status); @@ -242,7 +242,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data) static void iwl_prepare_ct_kill_task(struct iwl_priv *priv) { - IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n"); + IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n"); /* make request to retrieve statistics information */ iwl_send_statistics_request(priv, CMD_SYNC, false); /* Reschedule the ct_kill wait timer */ @@ -273,7 +273,7 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) (temp > tt->tt_previous_temp) && ((temp - tt->tt_previous_temp) > IWL_TT_INCREASE_MARGIN)) { - IWL_DEBUG_POWER(priv, + IWL_DEBUG_TEMP(priv, "Temperature increase %d degree Celsius\n", (temp - tt->tt_previous_temp)); } @@ -338,9 +338,9 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) } else if (old_state == IWL_TI_CT_KILL && tt->state != IWL_TI_CT_KILL) iwl_perform_ct_kill_task(priv, false); - IWL_DEBUG_POWER(priv, "Temperature state changed %u\n", + IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n", tt->state); - IWL_DEBUG_POWER(priv, "Power Index change to %u\n", + IWL_DEBUG_TEMP(priv, "Power Index change to %u\n", tt->tt_power_mode); } mutex_unlock(&priv->mutex); @@ -397,7 +397,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force) (temp > tt->tt_previous_temp) && ((temp - tt->tt_previous_temp) > IWL_TT_INCREASE_MARGIN)) { - IWL_DEBUG_POWER(priv, + IWL_DEBUG_TEMP(priv, "Temperature increase %d " "degree Celsius\n", (temp - tt->tt_previous_temp)); @@ -467,13 +467,13 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force) set_bit(STATUS_CT_KILL, &priv->status); tt->state = old_state; } else { - IWL_DEBUG_POWER(priv, + IWL_DEBUG_TEMP(priv, "Thermal Throttling to new state: %u\n", tt->state); if (old_state != IWL_TI_CT_KILL && tt->state == IWL_TI_CT_KILL) { if (force) { - IWL_DEBUG_POWER(priv, + IWL_DEBUG_TEMP(priv, "Enter IWL_TI_CT_KILL\n"); set_bit(STATUS_CT_KILL, &priv->status); iwl_perform_ct_kill_task(priv, true); @@ -483,7 +483,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force) } } else if (old_state == IWL_TI_CT_KILL && tt->state != IWL_TI_CT_KILL) { - IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n"); + IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n"); iwl_perform_ct_kill_task(priv, false); } } @@ -568,7 +568,7 @@ void iwl_tt_enter_ct_kill(struct iwl_priv *priv) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n"); + IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n"); queue_work(priv->workqueue, &priv->ct_enter); } @@ -577,7 +577,7 @@ void iwl_tt_exit_ct_kill(struct iwl_priv *priv) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n"); + IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n"); queue_work(priv->workqueue, &priv->ct_exit); } @@ -603,7 +603,7 @@ void iwl_tt_handler(struct iwl_priv *priv) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n"); + IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n"); queue_work(priv->workqueue, &priv->tt_work); } @@ -618,7 +618,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); struct iwl_tt_trans *transaction; - IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n"); + IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n"); memset(tt, 0, sizeof(struct iwl_tt_mgmt)); @@ -638,7 +638,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); if (priv->cfg->base_params->adv_thermal_throttle) { - IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n"); + IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) * IWL_TI_STATE_MAX, GFP_KERNEL); tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) * @@ -671,7 +671,7 @@ void iwl_tt_initialize(struct iwl_priv *priv) priv->thermal_throttle.advanced_tt = true; } } else { - IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n"); + IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n"); priv->thermal_throttle.advanced_tt = false; } } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 4974cd7837c..53bb59ee719 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -39,6 +39,7 @@ #include "iwl-helpers.h" #include "iwl-agn-hw.h" #include "iwl-agn.h" +#include "iwl-trans.h" /* * mac80211 queues, ACs, hardware queues, FIFOs. @@ -95,132 +96,8 @@ static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid) return -EINVAL; } -/** - * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - u16 byte_cnt) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; - int write_ptr = txq->q.write_ptr; - int txq_id = txq->q.id; - u8 sec_ctl = 0; - u8 sta_id = 0; - u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; - __le16 bc_ent; - - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - - sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; - sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; - - switch (sec_ctl & TX_CMD_SEC_MSK) { - case TX_CMD_SEC_CCM: - len += CCMP_MIC_LEN; - break; - case TX_CMD_SEC_TKIP: - len += TKIP_ICV_LEN; - break; - case TX_CMD_SEC_WEP: - len += WEP_IV_LEN + WEP_ICV_LEN; - break; - } - - bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; -} - -static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, - struct iwl_tx_queue *txq) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; - int txq_id = txq->q.id; - int read_ptr = txq->q.read_ptr; - u8 sta_id = 0; - __le16 bc_ent; - - WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - - if (txq_id != priv->cmd_queue) - sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; - - bc_ent = cpu_to_le16(1 | (sta_id << 12)); - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; - - if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; -} - -static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, - u16 txq_id) -{ - u32 tbl_dw_addr; - u32 tbl_dw; - u16 scd_q2ratid; - - scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; - - tbl_dw_addr = priv->scd_base_addr + - IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); - - tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); - - if (txq_id & 0x1) - tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); - else - tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); - - iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); - - return 0; -} - -static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) -{ - /* Simply stop the queue, but don't change any configuration; - * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - iwl_write_prph(priv, - IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), - (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)| - (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - -void iwlagn_set_wr_ptrs(struct iwl_priv *priv, - int txq_id, u32 index) -{ - iwl_write_direct32(priv, HBUS_TARG_WRPTR, - (index & 0xff) | (txq_id << 8)); - iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index); -} - -void iwlagn_tx_queue_set_status(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry) -{ - int txq_id = txq->q.id; - int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; - - iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), - (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) | - (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) | - IWLAGN_SCD_QUEUE_STTS_REG_MSK); - - txq->sched_retry = scd_retry; - - IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", - active ? "Activate" : "Deactivate", - scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); -} - -static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid) +static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, + int tid) { if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || (IWLAGN_FIRST_AMPDU_QUEUE + @@ -237,106 +114,14 @@ static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); } -void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv, - struct ieee80211_sta *sta, - int tid, int frame_limit) +static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) { - int sta_id, tx_fifo, txq_id, ssn_idx; - u16 ra_tid; - unsigned long flags; - struct iwl_tid_data *tid_data; - - sta_id = iwl_sta_id(sta); - if (WARN_ON(sta_id == IWL_INVALID_STATION)) - return; - if (WARN_ON(tid >= MAX_TID_COUNT)) - return; - - spin_lock_irqsave(&priv->sta_lock, flags); - tid_data = &priv->stations[sta_id].tid[tid]; - ssn_idx = SEQ_TO_SN(tid_data->seq_number); - txq_id = tid_data->agg.txq_id; - tx_fifo = tid_data->agg.tx_fifo; - spin_unlock_irqrestore(&priv->sta_lock, flags); - - ra_tid = BUILD_RAxTID(sta_id, tid); - - spin_lock_irqsave(&priv->lock, flags); - - /* Stop this Tx queue before configuring it */ - iwlagn_tx_queue_stop_scheduler(priv, txq_id); - - /* Map receiver-address / traffic-ID to this queue */ - iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); - - /* Set this queue as a chain-building queue */ - iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id)); - - /* enable aggregations for the queue */ - iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id)); - - /* Place first TFD at index corresponding to start sequence number. - * Assumes that ssn_idx is valid (!= 0xFFF) */ - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); - - /* Set up Tx window size and frame limit for this queue */ - iwl_write_targ_mem(priv, priv->scd_base_addr + - IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + - sizeof(u32), - ((frame_limit << - IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | - ((frame_limit << - IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); - - iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); - - /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); - - spin_unlock_irqrestore(&priv->lock, flags); -} - -static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, - u16 ssn_idx, u8 tx_fifo) -{ - if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || - (IWLAGN_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IWL_ERR(priv, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWLAGN_FIRST_AMPDU_QUEUE, - IWLAGN_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues - 1); - return -EINVAL; - } - - iwlagn_tx_queue_stop_scheduler(priv, txq_id); - - iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id)); - - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - /* supposes that ssn_idx is valid (!= 0xFFF) */ - iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); - - iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); - iwl_txq_ctx_deactivate(priv, txq_id); - iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); - - return 0; -} - -/* - * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask - * must be called under priv->lock and mac access - */ -void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) -{ - iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask); + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || + info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT || + info->flags & IEEE80211_TX_CTL_AMPDU) + *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; } /* @@ -353,19 +138,15 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, __le32 tx_flags = tx_cmd->tx_flags; tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) tx_flags |= TX_CMD_FLG_ACK_MSK; - if (ieee80211_is_mgmt(fc)) - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - if (ieee80211_is_probe_resp(fc) && - !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) - tx_flags |= TX_CMD_FLG_TSF_MSK; - } else { - tx_flags &= (~TX_CMD_FLG_ACK_MSK); - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; - } + else + tx_flags &= ~TX_CMD_FLG_ACK_MSK; - if (ieee80211_is_back_req(fc)) + if (ieee80211_is_probe_resp(fc)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + else if (ieee80211_is_back_req(fc)) tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; else if (info->band == IEEE80211_BAND_2GHZ && priv->cfg->bt_params && @@ -388,7 +169,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } - priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); + iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); if (ieee80211_is_mgmt(fc)) { @@ -436,6 +217,18 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, if (ieee80211_is_data(fc)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL + if (priv->tm_fixed_rate) { + /* + * rate overwrite by testmode + * we not only send lq command to change rate + * we also re-enforce per data pkt base. + */ + tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK; + memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate, + sizeof(tx_cmd->rate_n_flags)); + } +#endif return; } @@ -497,8 +290,7 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; - ieee80211_get_tkip_key(keyconf, skb_frag, - IEEE80211_TKIP_P2_KEY, tx_cmd->key); + ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); break; @@ -528,26 +320,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; struct iwl_station_priv *sta_priv = NULL; - struct iwl_tx_queue *txq; - struct iwl_queue *q; - struct iwl_device_cmd *out_cmd; - struct iwl_cmd_meta *out_meta; - struct iwl_tx_cmd *tx_cmd; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_tx_cmd *tx_cmd; int txq_id; - dma_addr_t phys_addr = 0; - dma_addr_t txcmd_phys; - dma_addr_t scratch_phys; - u16 len, firstlen, secondlen; + u16 seq_number = 0; __le16 fc; u8 hdr_len; + u16 len; u8 sta_id; - u8 wait_write_ptr = 0; u8 tid = 0; - u8 *qc = NULL; unsigned long flags; bool is_agg = false; @@ -595,8 +378,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); - if (sta) - sta_priv = (void *)sta->drv_priv; + if (info->control.sta) + sta_priv = (void *)info->control.sta->drv_priv; if (sta_priv && sta_priv->asleep && (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) { @@ -631,6 +414,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) spin_lock(&priv->sta_lock); if (ieee80211_is_data_qos(fc)) { + u8 *qc = NULL; qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; @@ -651,38 +435,13 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) } } - txq = &priv->txq[txq_id]; - q = &txq->q; - - if (unlikely(iwl_queue_space(q) < q->high_mark)) + tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id); + if (unlikely(!tx_cmd)) goto drop_unlock_sta; - /* Set up driver data for this TFD */ - memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); - txq->txb[q->write_ptr].skb = skb; - txq->txb[q->write_ptr].ctx = ctx; - - /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_cmd = txq->cmd[q->write_ptr]; - out_meta = &txq->meta[q->write_ptr]; - tx_cmd = &out_cmd->cmd.tx; - memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); - memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); - - /* - * Set up the Tx-command (not MAC!) header. - * Store the chosen Tx queue and TFD index within the sequence field; - * after Tx, uCode's Tx response will return this value so driver can - * locate the frame within the tx queue and do post-tx processing. - */ - out_cmd->hdr.cmd = REPLY_TX; - out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(q->write_ptr))); - /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdr_len); - /* Total # bytes to be transmitted */ len = (u16)skb->len; tx_cmd->len = cpu_to_le16(len); @@ -697,54 +456,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); iwl_update_stats(priv, true, fc, len); - /* - * Use the first empty entry in this queue's command buffer array - * to contain the Tx command and MAC header concatenated together - * (payload data will be in another buffer). - * Size of this varies, due to varying MAC header length. - * If end is not dword aligned, we'll have 2 extra bytes at the end - * of the MAC header (device reads on dword boundaries). - * We'll tell device about this padding later. - */ - len = sizeof(struct iwl_tx_cmd) + - sizeof(struct iwl_cmd_header) + hdr_len; - firstlen = (len + 3) & ~3; - - /* Tell NIC about any 2-byte padding after MAC header */ - if (firstlen != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - - /* Physical address of this Tx command's header (not MAC header!), - * within command buffer array. */ - txcmd_phys = pci_map_single(priv->pci_dev, - &out_cmd->hdr, firstlen, - PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys))) - goto drop_unlock_sta; - dma_unmap_addr_set(out_meta, mapping, txcmd_phys); - dma_unmap_len_set(out_meta, len, firstlen); - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - - /* Set up TFD's 2nd entry to point directly to remainder of skb, - * if any (802.11 null frames have no payload). */ - secondlen = skb->len - hdr_len; - if (secondlen > 0) { - phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, - secondlen, PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { - pci_unmap_single(priv->pci_dev, - dma_unmap_addr(out_meta, mapping), - dma_unmap_len(out_meta, len), - PCI_DMA_BIDIRECTIONAL); - goto drop_unlock_sta; - } - } + if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx)) + goto drop_unlock_sta; if (ieee80211_is_data_qos(fc)) { priv->stations[sta_id].tid[tid].tfds_in_queue++; @@ -753,55 +467,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) } spin_unlock(&priv->sta_lock); - - /* Attach buffers to TFD */ - iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1); - if (secondlen > 0) - iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, - secondlen, 0); - - scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); - - /* take back ownership of DMA buffer to enable update */ - pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, - firstlen, PCI_DMA_BIDIRECTIONAL); - tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); - tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); - - IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", - le16_to_cpu(out_cmd->hdr.sequence)); - IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); - iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); - iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); - - /* Set up entry for this TFD in Tx byte-count array */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) - iwlagn_txq_update_byte_cnt_tbl(priv, txq, - le16_to_cpu(tx_cmd->len)); - - pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, - firstlen, PCI_DMA_BIDIRECTIONAL); - - trace_iwlwifi_dev_tx(priv, - &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], - sizeof(struct iwl_tfd), - &out_cmd->hdr, firstlen, - skb->data + hdr_len, secondlen); - - /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); /* - * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually, - * regardless of the value of ret. "ret" only indicates - * whether or not we should update the write pointer. - */ - - /* * Avoid atomic ops if it isn't an associated client. * Also, if this is a packet for aggregation, don't * increase the counter because the ucode will stop @@ -811,17 +479,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) if (sta_priv && sta_priv->client && !is_agg) atomic_inc(&sta_priv->pending_frames); - if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { - if (wait_write_ptr) { - spin_lock_irqsave(&priv->lock, flags); - txq->need_update = 1; - iwl_txq_update_write_ptr(priv, txq); - spin_unlock_irqrestore(&priv->lock, flags); - } else { - iwl_stop_queue(priv, txq); - } - } - return 0; drop_unlock_sta: @@ -831,178 +488,6 @@ drop_unlock_priv: return -1; } -static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, - struct iwl_dma_ptr *ptr, size_t size) -{ - ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, - GFP_KERNEL); - if (!ptr->addr) - return -ENOMEM; - ptr->size = size; - return 0; -} - -static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, - struct iwl_dma_ptr *ptr) -{ - if (unlikely(!ptr->addr)) - return; - - dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); - memset(ptr, 0, sizeof(*ptr)); -} - -/** - * iwlagn_hw_txq_ctx_free - Free TXQ Context - * - * Destroy all TX DMA queues and structures - */ -void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv) -{ - int txq_id; - - /* Tx queues */ - if (priv->txq) { - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) - if (txq_id == priv->cmd_queue) - iwl_cmd_queue_free(priv); - else - iwl_tx_queue_free(priv, txq_id); - } - iwlagn_free_dma_ptr(priv, &priv->kw); - - iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); - - /* free tx queue structure */ - iwl_free_txq_mem(priv); -} - -/** - * iwlagn_txq_ctx_alloc - allocate TX queue context - * Allocate all Tx DMA structures and initialize them - * - * @param priv - * @return error code - */ -int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) -{ - int ret; - int txq_id, slots_num; - unsigned long flags; - - /* Free all tx/cmd queues and keep-warm buffer */ - iwlagn_hw_txq_ctx_free(priv); - - ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, - priv->hw_params.scd_bc_tbls_size); - if (ret) { - IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); - goto error_bc_tbls; - } - /* Alloc keep-warm buffer */ - ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); - if (ret) { - IWL_ERR(priv, "Keep Warm allocation failed\n"); - goto error_kw; - } - - /* allocate tx queue structure */ - ret = iwl_alloc_txq_mem(priv); - if (ret) - goto error; - - spin_lock_irqsave(&priv->lock, flags); - - /* Turn off all Tx DMA fifos */ - iwlagn_txq_set_sched(priv, 0); - - /* Tell NIC where to find the "keep warm" buffer */ - iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { - slots_num = (txq_id == priv->cmd_queue) ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, - txq_id); - if (ret) { - IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); - goto error; - } - } - - return ret; - - error: - iwlagn_hw_txq_ctx_free(priv); - iwlagn_free_dma_ptr(priv, &priv->kw); - error_kw: - iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); - error_bc_tbls: - return ret; -} - -void iwlagn_txq_ctx_reset(struct iwl_priv *priv) -{ - int txq_id, slots_num; - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - - /* Turn off all Tx DMA fifos */ - iwlagn_txq_set_sched(priv, 0); - - /* Tell NIC where to find the "keep warm" buffer */ - iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Alloc and init all Tx queues, including the command queue (#4) */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { - slots_num = txq_id == priv->cmd_queue ? - TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); - } -} - -/** - * iwlagn_txq_ctx_stop - Stop all Tx DMA channels - */ -void iwlagn_txq_ctx_stop(struct iwl_priv *priv) -{ - int ch, txq_id; - unsigned long flags; - - /* Turn off all Tx DMA fifos */ - spin_lock_irqsave(&priv->lock, flags); - - iwlagn_txq_set_sched(priv, 0); - - /* Stop each Tx DMA channel, and wait for it to be idle */ - for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { - iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); - if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, - FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), - 1000)) - IWL_ERR(priv, "Failing on timeout while stopping" - " DMA channel %d [0x%08x]", ch, - iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); - } - spin_unlock_irqrestore(&priv->lock, flags); - - if (!priv->txq) - return; - - /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) - if (txq_id == priv->cmd_queue) - iwl_cmd_queue_unmap(priv); - else - iwl_tx_queue_unmap(priv, txq_id); -} - /* * Find first available (lowest unused) Tx Queue, mark it "active". * Called only when finding queue for aggregation. @@ -1033,8 +518,8 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, if (unlikely(tx_fifo < 0)) return tx_fifo; - IWL_WARN(priv, "%s on ra = %pM tid = %d\n", - __func__, sta->addr, tid); + IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", + sta->addr, tid); sta_id = iwl_sta_id(sta); if (sta_id == IWL_INVALID_STATION) { @@ -1150,7 +635,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, * to deactivate the uCode queue, just return "success" to allow * mac80211 to clean up it own data. */ - iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id); + trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id); spin_unlock_irqrestore(&priv->lock, flags); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); @@ -1179,7 +664,8 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv, u16 ssn = SEQ_TO_SN(tid_data->seq_number); int tx_fifo = get_fifo_from_tid(ctx, tid); IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); - iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo); + trans_txq_agg_disable(&priv->trans, txq_id, + ssn, tx_fifo); tid_data->agg.state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); } @@ -1236,9 +722,9 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) struct ieee80211_hdr *hdr; if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { - IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " - "is out of range [0-%d] %d %d.\n", txq_id, - index, q->n_bd, q->write_ptr, q->read_ptr); + IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), " + "index %d is out of range [0-%d] %d %d.\n", __func__, + txq_id, index, q->n_bd, q->write_ptr, q->read_ptr); return 0; } @@ -1261,7 +747,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) iwlagn_txq_inval_byte_cnt_tbl(priv, txq); - iwlagn_txq_free_tfd(priv, txq); + iwlagn_txq_free_tfd(priv, txq, txq->q.read_ptr); } return nfreed; } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index 97de5d9de67..a895a099d08 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -39,38 +39,7 @@ #include "iwl-agn-hw.h" #include "iwl-agn.h" #include "iwl-agn-calib.h" - -#define IWL_AC_UNSET -1 - -struct queue_to_fifo_ac { - s8 fifo, ac; -}; - -static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { - { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, - { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, - { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, - { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, - { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, - { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, -}; - -static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { - { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, - { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, - { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, - { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, - { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, - { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, - { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, - { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, - { IWL_TX_FIFO_BE_IPAN, 2, }, - { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, -}; +#include "iwl-trans.h" static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, @@ -143,7 +112,7 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name, FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); - IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name); + IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name); ret = wait_event_interruptible_timeout(priv->wait_command_queue, priv->ucode_write_complete, 5 * HZ); if (ret == -ERESTARTSYS) { @@ -183,10 +152,7 @@ static int iwlagn_set_Xtal_calib(struct iwl_priv *priv) __le16 *xtal_calib = (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL); - cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; - cmd.hdr.first_group = 0; - cmd.hdr.groups_num = 1; - cmd.hdr.data_valid = 1; + iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], @@ -197,17 +163,16 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) { struct iwl_calib_temperature_offset_cmd cmd; __le16 *offset_calib = - (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE); - cmd.hdr.op_code = IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD; - cmd.hdr.first_group = 0; - cmd.hdr.groups_num = 1; - cmd.hdr.data_valid = 1; - cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]); + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE); + + memset(&cmd, 0, sizeof(cmd)); + iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); + memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib)); if (!(cmd.radio_sensor_offset)) cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; - cmd.reserved = 0; + IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n", - cmd.radio_sensor_offset); + le16_to_cpu(cmd.radio_sensor_offset)); return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET], (u8 *)&cmd, sizeof(cmd)); } @@ -225,9 +190,10 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv) calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; - calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL; + calib_cfg_cmd.ucd_calib_cfg.flags = + IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK; - return iwl_send_cmd(priv, &cmd); + return trans_send_cmd(&priv->trans, &cmd); } void iwlagn_rx_calib_result(struct iwl_priv *priv, @@ -325,7 +291,8 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv) /* coexistence is disabled */ memset(&coex_cmd, 0, sizeof(coex_cmd)); } - return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD, + return trans_send_cmd_pdu(&priv->trans, + COEX_PRIORITY_TABLE_CMD, CMD_SYNC, sizeof(coex_cmd), &coex_cmd); } @@ -357,7 +324,8 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv) memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl, sizeof(iwlagn_bt_prio_tbl)); - if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE, + if (trans_send_cmd_pdu(&priv->trans, + REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC, sizeof(prio_tbl_cmd), &prio_tbl_cmd)) IWL_ERR(priv, "failed to send BT prio tbl command\n"); } @@ -369,7 +337,8 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) env_cmd.action = action; env_cmd.type = type; - ret = iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV, + ret = trans_send_cmd_pdu(&priv->trans, + REPLY_BT_COEX_PROT_ENV, CMD_SYNC, sizeof(env_cmd), &env_cmd); if (ret) IWL_ERR(priv, "failed to send BT env command\n"); @@ -379,109 +348,9 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) static int iwlagn_alive_notify(struct iwl_priv *priv) { - const struct queue_to_fifo_ac *queue_to_fifo; - struct iwl_rxon_context *ctx; - u32 a; - unsigned long flags; - int i, chan; - u32 reg_val; int ret; - spin_lock_irqsave(&priv->lock, flags); - - priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR); - a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET; - for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET; - a += 4) - iwl_write_targ_mem(priv, a, 0); - for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET; - a += 4) - iwl_write_targ_mem(priv, a, 0); - for (; a < priv->scd_base_addr + - IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) - iwl_write_targ_mem(priv, a, 0); - - iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR, - priv->scd_bc_tbls.dma >> 10); - - /* Enable DMA channel */ - for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++) - iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); - - /* Update FH chicken bits */ - reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); - iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, - reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); - - iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, - IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv)); - iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0); - - /* initiate the queues */ - for (i = 0; i < priv->hw_params.max_txq_num; i++) { - iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0); - iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); - iwl_write_targ_mem(priv, priv->scd_base_addr + - IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0); - iwl_write_targ_mem(priv, priv->scd_base_addr + - IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) + - sizeof(u32), - ((SCD_WIN_SIZE << - IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | - ((SCD_FRAME_LIMIT << - IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); - } - - iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, - IWL_MASK(0, priv->hw_params.max_txq_num)); - - /* Activate all Tx DMA/FIFO channels */ - iwlagn_txq_set_sched(priv, IWL_MASK(0, 7)); - - /* map queues to FIFOs */ - if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) - queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; - else - queue_to_fifo = iwlagn_default_queue_to_tx_fifo; - - iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0); - - /* make sure all queue are not stopped */ - memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); - for (i = 0; i < 4; i++) - atomic_set(&priv->queue_stop_count[i], 0); - for_each_context(priv, ctx) - ctx->last_tx_rejected = false; - - /* reset to 0 to enable all the queue first */ - priv->txq_ctx_active_msk = 0; - - BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10); - BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10); - - for (i = 0; i < 10; i++) { - int fifo = queue_to_fifo[i].fifo; - int ac = queue_to_fifo[i].ac; - - iwl_txq_ctx_activate(priv, i); - - if (fifo == IWL_TX_FIFO_UNUSED) - continue; - - if (ac != IWL_AC_UNSET) - iwl_set_swq_id(&priv->txq[i], ac, i); - iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0); - } - - spin_unlock_irqrestore(&priv->lock, flags); - - /* Enable L1-Active */ - iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + trans_tx_start(&priv->trans); ret = iwlagn_send_wimax_coex(priv); if (ret) @@ -508,7 +377,7 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, u32 val; u32 i; - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { /* read data comes through single port, auto-incr addr */ @@ -533,7 +402,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv, u32 offs; int errors = 0; - IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, IWLAGN_RTC_INST_LOWER_BOUND); @@ -559,7 +428,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv, static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img) { if (!iwlcore_verify_inst_sparse(priv, &img->code)) { - IWL_DEBUG_INFO(priv, "uCode is good in inst SRAM\n"); + IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n"); return 0; } @@ -583,7 +452,7 @@ static void iwlagn_alive_fn(struct iwl_priv *priv, palive = &pkt->u.alive_frame; - IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", palive->is_valid, palive->ver_type, palive->ver_subtype); @@ -602,14 +471,14 @@ static void iwlagn_alive_fn(struct iwl_priv *priv, int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, struct fw_img *image, - int subtype, int alternate_subtype) + enum iwlagn_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; struct iwlagn_alive_data alive_data; int ret; - enum iwlagn_ucode_subtype old_type; + enum iwlagn_ucode_type old_type; - ret = iwlagn_start_device(priv); + ret = trans_start_device(&priv->trans); if (ret) return ret; @@ -617,7 +486,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, iwlagn_alive_fn, &alive_data); old_type = priv->ucode_type; - priv->ucode_type = subtype; + priv->ucode_type = ucode_type; ret = iwlagn_load_given_ucode(priv, image); if (ret) { @@ -626,8 +495,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, return ret; } - /* Remove all resets to allow NIC to operate */ - iwl_write32(priv, CSR_RESET, 0); + trans_kick_nic(&priv->trans); /* * Some things may run in the background now, but we @@ -645,24 +513,22 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, return -EIO; } - if (alive_data.subtype != subtype && - alive_data.subtype != alternate_subtype) { - IWL_ERR(priv, - "Loaded ucode is not expected type (got %d, expected %d)!\n", - alive_data.subtype, subtype); - priv->ucode_type = old_type; - return -EIO; - } + /* + * This step takes a long time (60-80ms!!) and + * WoWLAN image should be loaded quickly, so + * skip it for WoWLAN. + */ + if (ucode_type != IWL_UCODE_WOWLAN) { + ret = iwl_verify_ucode(priv, image); + if (ret) { + priv->ucode_type = old_type; + return ret; + } - ret = iwl_verify_ucode(priv, image); - if (ret) { - priv->ucode_type = old_type; - return ret; + /* delay a bit to give rfkill time to run */ + msleep(5); } - /* delay a bit to give rfkill time to run */ - msleep(5); - ret = iwlagn_alive_notify(priv); if (ret) { IWL_WARN(priv, @@ -685,7 +551,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv) if (!priv->ucode_init.code.len) return 0; - if (priv->ucode_type != UCODE_SUBTYPE_NONE_LOADED) + if (priv->ucode_type != IWL_UCODE_NONE) return 0; iwlagn_init_notification_wait(priv, &calib_wait, @@ -694,7 +560,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv) /* Will also start the device */ ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init, - UCODE_SUBTYPE_INIT, -1); + IWL_UCODE_INIT); if (ret) goto error; @@ -714,6 +580,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv) iwlagn_remove_notification(priv, &calib_wait); out: /* Whatever happened, stop the device */ - iwlagn_stop_device(priv); + trans_stop_device(&priv->trans); return ret; } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 8e1942ebd9a..b0ae4de7f08 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -26,14 +26,9 @@ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/delay.h> @@ -49,8 +44,6 @@ #include <asm/div64.h> -#define DRV_NAME "iwlagn" - #include "iwl-eeprom.h" #include "iwl-dev.h" #include "iwl-core.h" @@ -59,7 +52,8 @@ #include "iwl-sta.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" - +#include "iwl-bus.h" +#include "iwl-trans.h" /****************************************************************************** * @@ -93,12 +87,10 @@ void iwl_update_chain_flags(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; - if (priv->cfg->ops->hcmd->set_rxon_chain) { - for_each_context(priv, ctx) { - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - if (ctx->active.rx_chain != ctx->staging.rx_chain) - iwlcore_commit_rxon(priv, ctx); - } + for_each_context(priv, ctx) { + iwlagn_set_rxon_chain(priv, ctx); + if (ctx->active.rx_chain != ctx->staging.rx_chain) + iwlagn_commit_rxon(priv, ctx); } } @@ -134,7 +126,9 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) struct iwl_tx_beacon_cmd *tx_beacon_cmd; struct iwl_host_cmd cmd = { .id = REPLY_TX_BEACON, + .flags = CMD_SYNC, }; + struct ieee80211_tx_info *info; u32 frame_size; u32 rate_flags; u32 rate; @@ -175,14 +169,31 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) frame_size); /* Set up packet rate and flags */ - rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx); + info = IEEE80211_SKB_CB(priv->beacon_skb); + + /* + * Let's set up the rate at least somewhat correctly; + * it will currently not actually be used by the uCode, + * it uses the broadcast station's rate instead. + */ + if (info->control.rates[0].idx < 0 || + info->control.rates[0].flags & IEEE80211_TX_RC_MCS) + rate = 0; + else + rate = info->control.rates[0].idx; + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, priv->hw_params.valid_tx_ant); rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); - if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) + + /* In mac80211, rates for 5 GHz start at 0 */ + if (info->band == IEEE80211_BAND_5GHZ) + rate += IWL_FIRST_OFDM_RATE; + else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE) rate_flags |= RATE_MCS_CCK_MSK; - tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, - rate_flags); + + tx_beacon_cmd->tx.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, rate_flags); /* Submit command */ cmd.len[0] = sizeof(*tx_beacon_cmd); @@ -192,7 +203,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) cmd.data[1] = priv->beacon_skb->data; cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; - return iwl_send_cmd_sync(priv, &cmd); + return trans_send_cmd(&priv->trans, &cmd); } static void iwl_bg_beacon_update(struct work_struct *work) @@ -245,7 +256,7 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work) /* dont send host command if rf-kill is on */ if (!iwl_is_ready_rf(priv)) return; - priv->cfg->ops->hcmd->send_bt_config(priv); + iwlagn_send_advance_bt_config(priv); } static void iwl_bg_bt_full_concurrency(struct work_struct *work) @@ -272,12 +283,11 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work) * to avoid 3-wire collisions */ for_each_context(priv, ctx) { - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - iwlcore_commit_rxon(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); + iwlagn_commit_rxon(priv, ctx); } - priv->cfg->ops->hcmd->send_bt_config(priv); + iwlagn_send_advance_bt_config(priv); out: mutex_unlock(&priv->mutex); } @@ -362,7 +372,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) u32 next_entry; /* index of next entry to be written by uCode */ base = priv->device_pointers.error_event_table; - if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + if (iwlagn_hw_valid_rtc_data_addr(base)) { capacity = iwl_read_targ_mem(priv, base); num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); @@ -440,383 +450,8 @@ static void iwl_bg_tx_flush(struct work_struct *work) if (!iwl_is_ready_rf(priv)) return; - if (priv->cfg->ops->lib->txfifo_flush) { - IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); - iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); - } -} - -/** - * iwl_rx_handle - Main entry function for receiving responses from uCode - * - * Uses the priv->rx_handlers callback function array to invoke - * the appropriate handlers, including command responses, - * frame-received notifications, and other notifications. - */ -static void iwl_rx_handle(struct iwl_priv *priv) -{ - struct iwl_rx_mem_buffer *rxb; - struct iwl_rx_packet *pkt; - struct iwl_rx_queue *rxq = &priv->rxq; - u32 r, i; - int reclaim; - unsigned long flags; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; - - /* uCode's read index (stored in shared DRAM) indicates the last Rx - * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; - i = rxq->read; - - /* Rx interrupt, but nothing sent from uCode */ - if (i == r) - IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); - - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - - while (i != r) { - int len; - - rxb = rxq->queue[i]; - - /* If an RXB doesn't have a Rx queue slot associated with it, - * then a bug has been introduced in the queue refilling - * routines -- catch it here */ - if (WARN_ON(rxb == NULL)) { - i = (i + 1) & RX_QUEUE_MASK; - continue; - } - - rxq->queue[i] = NULL; - - pci_unmap_page(priv->pci_dev, rxb->page_dma, - PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - pkt = rxb_addr(rxb); - - len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - len += sizeof(u32); /* account for status word */ - trace_iwlwifi_dev_rx(priv, pkt, len); - - /* Reclaim a command buffer only if this packet is a response - * to a (driver-originated) command. - * If the packet (e.g. Rx frame) originated from uCode, - * there is no command buffer to reclaim. - * Ucode should set SEQ_RX_FRAME bit if ucode-originated, - * but apparently a few don't get set; catch them here. */ - reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && - (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && - (pkt->hdr.cmd != REPLY_RX) && - (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && - (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && - (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && - (pkt->hdr.cmd != REPLY_TX); - - /* - * Do the notification wait before RX handlers so - * even if the RX handler consumes the RXB we have - * access to it in the notification wait entry. - */ - if (!list_empty(&priv->_agn.notif_waits)) { - struct iwl_notification_wait *w; - - spin_lock(&priv->_agn.notif_wait_lock); - list_for_each_entry(w, &priv->_agn.notif_waits, list) { - if (w->cmd == pkt->hdr.cmd) { - w->triggered = true; - if (w->fn) - w->fn(priv, pkt, w->fn_data); - } - } - spin_unlock(&priv->_agn.notif_wait_lock); - - wake_up_all(&priv->_agn.notif_waitq); - } - if (priv->pre_rx_handler) - priv->pre_rx_handler(priv, rxb); - - /* Based on type of command response or notification, - * handle those that need handling via function in - * rx_handlers table. See iwl_setup_rx_handlers() */ - if (priv->rx_handlers[pkt->hdr.cmd]) { - IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, - i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); - priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; - priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); - } else { - /* No handling needed */ - IWL_DEBUG_RX(priv, - "r %d i %d No handler needed for %s, 0x%02x\n", - r, i, get_cmd_string(pkt->hdr.cmd), - pkt->hdr.cmd); - } - - /* - * XXX: After here, we should always check rxb->page - * against NULL before touching it or its virtual - * memory (pkt). Because some rx_handler might have - * already taken or freed the pages. - */ - - if (reclaim) { - /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking iwl_send_cmd() - * as we reclaim the driver command queue */ - if (rxb->page) - iwl_tx_cmd_complete(priv, rxb); - else - IWL_WARN(priv, "Claim null rxb?\n"); - } - - /* Reuse the page if possible. For notification packets and - * SKBs that fail to Rx correctly, add them back into the - * rx_free list for reuse later. */ - spin_lock_irqsave(&rxq->lock, flags); - if (rxb->page != NULL) { - rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, - 0, PAGE_SIZE << priv->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; - } else - list_add_tail(&rxb->list, &rxq->rx_used); - - spin_unlock_irqrestore(&rxq->lock, flags); - - i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - iwlagn_rx_replenish_now(priv); - count = 0; - } - } - } - - /* Backtrack one entry */ - rxq->read = i; - if (fill_rx) - iwlagn_rx_replenish_now(priv); - else - iwlagn_rx_queue_restock(priv); -} - -/* tasklet for iwlagn interrupt */ -static void iwl_irq_tasklet(struct iwl_priv *priv) -{ - u32 inta = 0; - u32 handled = 0; - unsigned long flags; - u32 i; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_mask; -#endif - - spin_lock_irqsave(&priv->lock, flags); - - /* Ack/clear/reset pending uCode interrupts. - * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, - */ - /* There is a hardware bug in the interrupt mask function that some - * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if - * they are disabled in the CSR_INT_MASK register. Furthermore the - * ICT interrupt handling mechanism has another bug that might cause - * these unmasked interrupts fail to be detected. We workaround the - * hardware bugs here by ACKing all the possible interrupts so that - * interrupt coalescing can still be achieved. - */ - iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask); - - inta = priv->_agn.inta; - -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_get_debug_level(priv) & IWL_DL_ISR) { - /* just for debug */ - inta_mask = iwl_read32(priv, CSR_INT_MASK); - IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", - inta, inta_mask); - } -#endif - - spin_unlock_irqrestore(&priv->lock, flags); - - /* saved interrupt in inta variable now we can reset priv->_agn.inta */ - priv->_agn.inta = 0; - - /* Now service all interrupt bits discovered above. */ - if (inta & CSR_INT_BIT_HW_ERR) { - IWL_ERR(priv, "Hardware error detected. Restarting.\n"); - - /* Tell the device to stop sending interrupts */ - iwl_disable_interrupts(priv); - - priv->isr_stats.hw++; - iwl_irq_handle_error(priv); - - handled |= CSR_INT_BIT_HW_ERR; - - return; - } - -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { - /* NIC fires this, but we don't use it, redundant with WAKEUP */ - if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " - "the frame/frames.\n"); - priv->isr_stats.sch++; - } - - /* Alive notification via Rx interrupt will do the real work */ - if (inta & CSR_INT_BIT_ALIVE) { - IWL_DEBUG_ISR(priv, "Alive interrupt\n"); - priv->isr_stats.alive++; - } - } -#endif - /* Safely ignore these bits for debug checks below */ - inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); - - /* HW RF KILL switch toggled */ - if (inta & CSR_INT_BIT_RF_KILL) { - int hw_rf_kill = 0; - if (!(iwl_read32(priv, CSR_GP_CNTRL) & - CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) - hw_rf_kill = 1; - - IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", - hw_rf_kill ? "disable radio" : "enable radio"); - - priv->isr_stats.rfkill++; - - /* driver only loads ucode once setting the interface up. - * the driver allows loading the ucode even if the radio - * is killed. Hence update the killswitch state here. The - * rfkill handler will care about restarting if needed. - */ - if (!test_bit(STATUS_ALIVE, &priv->status)) { - if (hw_rf_kill) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); - } - - handled |= CSR_INT_BIT_RF_KILL; - } - - /* Chip got too hot and stopped itself */ - if (inta & CSR_INT_BIT_CT_KILL) { - IWL_ERR(priv, "Microcode CT kill error detected.\n"); - priv->isr_stats.ctkill++; - handled |= CSR_INT_BIT_CT_KILL; - } - - /* Error detected by uCode */ - if (inta & CSR_INT_BIT_SW_ERR) { - IWL_ERR(priv, "Microcode SW error detected. " - " Restarting 0x%X.\n", inta); - priv->isr_stats.sw++; - iwl_irq_handle_error(priv); - handled |= CSR_INT_BIT_SW_ERR; - } - - /* uCode wakes up after power-down sleep */ - if (inta & CSR_INT_BIT_WAKEUP) { - IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); - iwl_rx_queue_update_write_ptr(priv, &priv->rxq); - for (i = 0; i < priv->hw_params.max_txq_num; i++) - iwl_txq_update_write_ptr(priv, &priv->txq[i]); - - priv->isr_stats.wakeup++; - - handled |= CSR_INT_BIT_WAKEUP; - } - - /* All uCode command responses, including Tx command responses, - * Rx "responses" (frame-received notification), and other - * notifications from uCode come through here*/ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | - CSR_INT_BIT_RX_PERIODIC)) { - IWL_DEBUG_ISR(priv, "Rx interrupt\n"); - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { - handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); - iwl_write32(priv, CSR_FH_INT_STATUS, - CSR_FH_INT_RX_MASK); - } - if (inta & CSR_INT_BIT_RX_PERIODIC) { - handled |= CSR_INT_BIT_RX_PERIODIC; - iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); - } - /* Sending RX interrupt require many steps to be done in the - * the device: - * 1- write interrupt to current index in ICT table. - * 2- dma RX frame. - * 3- update RX shared data to indicate last write index. - * 4- send interrupt. - * This could lead to RX race, driver could receive RX interrupt - * but the shared data changes does not reflect this; - * periodic interrupt will detect any dangling Rx activity. - */ - - /* Disable periodic interrupt; we use it as just a one-shot. */ - iwl_write8(priv, CSR_INT_PERIODIC_REG, - CSR_INT_PERIODIC_DIS); - iwl_rx_handle(priv); - - /* - * Enable periodic interrupt in 8 msec only if we received - * real RX interrupt (instead of just periodic int), to catch - * any dangling Rx interrupt. If it was just the periodic - * interrupt, there was no dangling Rx activity, and no need - * to extend the periodic interrupt; one-shot is enough. - */ - if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) - iwl_write8(priv, CSR_INT_PERIODIC_REG, - CSR_INT_PERIODIC_ENA); - - priv->isr_stats.rx++; - } - - /* This "Tx" DMA channel is used only for loading uCode */ - if (inta & CSR_INT_BIT_FH_TX) { - iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); - IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); - priv->isr_stats.tx++; - handled |= CSR_INT_BIT_FH_TX; - /* Wake up uCode load routine, now that load is complete */ - priv->ucode_write_complete = 1; - wake_up_interruptible(&priv->wait_command_queue); - } - - if (inta & ~handled) { - IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); - priv->isr_stats.unhandled++; - } - - if (inta & ~(priv->inta_mask)) { - IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", - inta & ~priv->inta_mask); - } - - /* Re-enable all interrupts */ - /* only Re-enable if disabled by irq */ - if (test_bit(STATUS_INT_ENABLED, &priv->status)) - iwl_enable_interrupts(priv); - /* Re-enable RF_KILL if it occurred */ - else if (handled & CSR_INT_BIT_RF_KILL) - iwl_enable_rfkill_int(priv); + IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); + iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); } /***************************************************************************** @@ -939,22 +574,29 @@ static struct attribute_group iwl_attribute_group = { * ******************************************************************************/ -static void iwl_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc) +static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc) { if (desc->v_addr) - dma_free_coherent(&pci_dev->dev, desc->len, + dma_free_coherent(priv->bus->dev, desc->len, desc->v_addr, desc->p_addr); desc->v_addr = NULL; desc->len = 0; } -static void iwl_free_fw_img(struct pci_dev *pci_dev, struct fw_img *img) +static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img) +{ + iwl_free_fw_desc(priv, &img->code); + iwl_free_fw_desc(priv, &img->data); +} + +static void iwl_dealloc_ucode(struct iwl_priv *priv) { - iwl_free_fw_desc(pci_dev, &img->code); - iwl_free_fw_desc(pci_dev, &img->data); + iwl_free_fw_img(priv, &priv->ucode_rt); + iwl_free_fw_img(priv, &priv->ucode_init); + iwl_free_fw_img(priv, &priv->ucode_wowlan); } -static int iwl_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc, +static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc, const void *data, size_t len) { if (!len) { @@ -962,21 +604,16 @@ static int iwl_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc, return -EINVAL; } - desc->v_addr = dma_alloc_coherent(&pci_dev->dev, len, + desc->v_addr = dma_alloc_coherent(priv->bus->dev, len, &desc->p_addr, GFP_KERNEL); if (!desc->v_addr) return -ENOMEM; + desc->len = len; memcpy(desc->v_addr, data, len); return 0; } -static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) -{ - iwl_free_fw_img(priv->pci_dev, &priv->ucode_rt); - iwl_free_fw_img(priv->pci_dev, &priv->ucode_init); -} - struct iwlagn_ucode_capabilities { u32 max_probe_length; u32 standard_phy_calibration_size; @@ -1021,13 +658,14 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) priv->firmware_name); return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, - &priv->pci_dev->dev, GFP_KERNEL, priv, - iwl_ucode_callback); + priv->bus->dev, + GFP_KERNEL, priv, iwl_ucode_callback); } struct iwlagn_firmware_pieces { - const void *inst, *data, *init, *init_data; - size_t inst_size, data_size, init_size, init_data_size; + const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data; + size_t inst_size, data_size, init_size, init_data_size, + wowlan_inst_size, wowlan_data_size; u32 build; @@ -1266,6 +904,14 @@ static int iwlagn_load_firmware(struct iwl_priv *priv, goto invalid_tlv_len; priv->enhance_sensitivity_table = true; break; + case IWL_UCODE_TLV_WOWLAN_INST: + pieces->wowlan_inst = tlv_data; + pieces->wowlan_inst_size = tlv_len; + break; + case IWL_UCODE_TLV_WOWLAN_DATA: + pieces->wowlan_data = tlv_data; + pieces->wowlan_data_size = tlv_len; + break; case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; @@ -1443,23 +1089,35 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ - if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.code, + if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code, pieces.inst, pieces.inst_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.data, + if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data, pieces.data, pieces.data_size)) goto err_pci_alloc; /* Initialization instructions and data */ if (pieces.init_size && pieces.init_data_size) { - if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.code, + if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code, pieces.init, pieces.init_size)) goto err_pci_alloc; - if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.data, + if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data, pieces.init_data, pieces.init_data_size)) goto err_pci_alloc; } + /* WoWLAN instructions and data */ + if (pieces.wowlan_inst_size && pieces.wowlan_data_size) { + if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code, + pieces.wowlan_inst, + pieces.wowlan_inst_size)) + goto err_pci_alloc; + if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data, + pieces.wowlan_data, + pieces.wowlan_data_size)) + goto err_pci_alloc; + } + /* Now that we can no longer fail, copy information */ /* @@ -1467,25 +1125,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) * for each event, which is of mode 1 (including timestamp) for all * new microcodes that include this information. */ - priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr; + priv->init_evtlog_ptr = pieces.init_evtlog_ptr; if (pieces.init_evtlog_size) - priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12; + priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12; else - priv->_agn.init_evtlog_size = + priv->init_evtlog_size = priv->cfg->base_params->max_event_log_size; - priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr; - priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr; + priv->init_errlog_ptr = pieces.init_errlog_ptr; + priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr; if (pieces.inst_evtlog_size) - priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; + priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; else - priv->_agn.inst_evtlog_size = + priv->inst_evtlog_size = priv->cfg->base_params->max_event_log_size; - priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr; + priv->inst_errlog_ptr = pieces.inst_errlog_ptr; priv->new_scan_threshold_behaviour = !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); - if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) { + if ((priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE) && + (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN)) { priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN); priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; } else @@ -1505,9 +1164,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) ucode_capa.standard_phy_calibration_size = IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; - priv->_agn.phy_calib_chain_noise_reset_cmd = + priv->phy_calib_chain_noise_reset_cmd = ucode_capa.standard_phy_calibration_size; - priv->_agn.phy_calib_chain_noise_gain_cmd = + priv->phy_calib_chain_noise_gain_cmd = ucode_capa.standard_phy_calibration_size + 1; /************************************************** @@ -1523,7 +1182,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) if (err) IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); - err = sysfs_create_group(&priv->pci_dev->dev.kobj, + err = sysfs_create_group(&(priv->bus->dev->kobj), &iwl_attribute_group); if (err) { IWL_ERR(priv, "failed to create sysfs device attributes\n"); @@ -1532,7 +1191,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); - complete(&priv->_agn.firmware_loading_complete); + complete(&priv->firmware_loading_complete); return; try_again: @@ -1544,14 +1203,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) err_pci_alloc: IWL_ERR(priv, "failed to allocate pci memory\n"); - iwl_dealloc_ucode_pci(priv); + iwl_dealloc_ucode(priv); out_unbind: - complete(&priv->_agn.firmware_loading_complete); - device_release_driver(&priv->pci_dev->dev); + complete(&priv->firmware_loading_complete); + device_release_driver(priv->bus->dev); release_firmware(ucode_raw); } -static const char *desc_lookup_text[] = { +static const char * const desc_lookup_text[] = { "OK", "FAIL", "BAD_PARAM", @@ -1575,7 +1234,7 @@ static const char *desc_lookup_text[] = { "NMI_INTERRUPT_DATA_ACTION_PT", "NMI_TRM_HW_ER", "NMI_INTERRUPT_TRM", - "NMI_INTERRUPT_BREAK_POINT" + "NMI_INTERRUPT_BREAK_POINT", "DEBUG_0", "DEBUG_1", "DEBUG_2", @@ -1626,19 +1285,19 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) struct iwl_error_event_table table; base = priv->device_pointers.error_event_table; - if (priv->ucode_type == UCODE_SUBTYPE_INIT) { + if (priv->ucode_type == IWL_UCODE_INIT) { if (!base) - base = priv->_agn.init_errlog_ptr; + base = priv->init_errlog_ptr; } else { if (!base) - base = priv->_agn.inst_errlog_ptr; + base = priv->inst_errlog_ptr; } - if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + if (!iwlagn_hw_valid_rtc_data_addr(base)) { IWL_ERR(priv, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (priv->ucode_type == UCODE_SUBTYPE_INIT) + (priv->ucode_type == IWL_UCODE_INIT) ? "Init" : "RT"); return; } @@ -1702,12 +1361,12 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, return pos; base = priv->device_pointers.log_event_table; - if (priv->ucode_type == UCODE_SUBTYPE_INIT) { + if (priv->ucode_type == IWL_UCODE_INIT) { if (!base) - base = priv->_agn.init_evtlog_ptr; + base = priv->init_evtlog_ptr; } else { if (!base) - base = priv->_agn.inst_evtlog_ptr; + base = priv->inst_evtlog_ptr; } if (mode == 0) @@ -1815,21 +1474,21 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, size_t bufsz = 0; base = priv->device_pointers.log_event_table; - if (priv->ucode_type == UCODE_SUBTYPE_INIT) { - logsize = priv->_agn.init_evtlog_size; + if (priv->ucode_type == IWL_UCODE_INIT) { + logsize = priv->init_evtlog_size; if (!base) - base = priv->_agn.init_evtlog_ptr; + base = priv->init_evtlog_ptr; } else { - logsize = priv->_agn.inst_evtlog_size; + logsize = priv->inst_evtlog_size; if (!base) - base = priv->_agn.inst_evtlog_ptr; + base = priv->inst_evtlog_ptr; } - if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + if (!iwlagn_hw_valid_rtc_data_addr(base)) { IWL_ERR(priv, "Invalid event log pointer 0x%08X for %s uCode\n", base, - (priv->ucode_type == UCODE_SUBTYPE_INIT) + (priv->ucode_type == IWL_UCODE_INIT) ? "Init" : "RT"); return -EINVAL; } @@ -1928,8 +1587,9 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) adv_cmd.critical_temperature_exit = cpu_to_le32(priv->hw_params.ct_kill_exit_threshold); - ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, - sizeof(adv_cmd), &adv_cmd); + ret = trans_send_cmd_pdu(&priv->trans, + REPLY_CT_KILL_CONFIG_CMD, + CMD_SYNC, sizeof(adv_cmd), &adv_cmd); if (ret) IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); else @@ -1943,8 +1603,9 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv) cmd.critical_temperature_R = cpu_to_le32(priv->hw_params.ct_kill_threshold); - ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, - sizeof(cmd), &cmd); + ret = trans_send_cmd_pdu(&priv->trans, + REPLY_CT_KILL_CONFIG_CMD, + CMD_SYNC, sizeof(cmd), &cmd); if (ret) IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); else @@ -1968,10 +1629,29 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg) calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg); - return iwl_send_cmd(priv, &cmd); + return trans_send_cmd(&priv->trans, &cmd); } +static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) +{ + struct iwl_tx_ant_config_cmd tx_ant_cmd = { + .valid = cpu_to_le32(valid_tx_ant), + }; + + if (IWL_UCODE_API(priv->ucode_ver) > 1) { + IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); + return trans_send_cmd_pdu(&priv->trans, + TX_ANT_CONFIGURATION_CMD, + CMD_SYNC, + sizeof(struct iwl_tx_ant_config_cmd), + &tx_ant_cmd); + } else { + IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); + return -EOPNOTSUPP; + } +} + /** * iwl_alive_start - called after REPLY_ALIVE notification received * from protocol/runtime uCode (initialization uCode's @@ -1982,6 +1662,7 @@ int iwl_alive_start(struct iwl_priv *priv) int ret = 0; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + /*TODO: this should go to the transport layer */ iwl_reset_ict(priv); IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); @@ -1999,11 +1680,18 @@ int iwl_alive_start(struct iwl_priv *priv) if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) { /* Configure Bluetooth device coexistence support */ + if (priv->cfg->bt_params->bt_sco_disable) + priv->bt_enable_pspoll = false; + else + priv->bt_enable_pspoll = true; + priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; - priv->cfg->ops->hcmd->send_bt_config(priv); + iwlagn_send_advance_bt_config(priv); priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS; + priv->cur_rssi_ctx = NULL; + iwlagn_send_prio_tbl(priv); /* FIXME: w/a to force change uCode BT state machine */ @@ -2015,7 +1703,13 @@ int iwl_alive_start(struct iwl_priv *priv) BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); if (ret) return ret; + } else { + /* + * default is 2-wire BT coexexistence support + */ + iwl_send_bt_config(priv); } + if (priv->hw_params.calib_rt_cfg) iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg); @@ -2024,10 +1718,9 @@ int iwl_alive_start(struct iwl_priv *priv) priv->active_rate = IWL_RATES_MASK; /* Configure Tx antenna selection based on H/W config */ - if (priv->cfg->ops->hcmd->set_tx_ant) - priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant); + iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant); - if (iwl_is_associated_ctx(ctx)) { + if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { struct iwl_rxon_cmd *active_rxon = (struct iwl_rxon_cmd *)&ctx->active; /* apply any changes in staging */ @@ -2039,24 +1732,18 @@ int iwl_alive_start(struct iwl_priv *priv) for_each_context(priv, tmp) iwl_connection_init_rx_config(priv, tmp); - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); } - if (!priv->cfg->bt_params || (priv->cfg->bt_params && - !priv->cfg->bt_params->advanced_bt_coexist)) { - /* - * default is 2-wire BT coexexistence support - */ - priv->cfg->ops->hcmd->send_bt_config(priv); + if (!priv->wowlan) { + /* WoWLAN ucode will not reply in the same way, skip it */ + iwl_reset_run_time_calib(priv); } - iwl_reset_run_time_calib(priv); - set_bit(STATUS_READY, &priv->status); /* Configure the adapter for unassociated operation */ - ret = iwlcore_commit_rxon(priv, ctx); + ret = iwlagn_commit_rxon(priv, ctx); if (ret) return ret; @@ -2090,6 +1777,8 @@ static void __iwl_down(struct iwl_priv *priv) /* reset BT coex data */ priv->bt_status = 0; + priv->cur_rssi_ctx = NULL; + priv->bt_is_sco = 0; if (priv->cfg->bt_params) priv->bt_traffic_load = priv->cfg->bt_params->bt_init_traffic_load; @@ -2116,7 +1805,7 @@ static void __iwl_down(struct iwl_priv *priv) test_bit(STATUS_EXIT_PENDING, &priv->status) << STATUS_EXIT_PENDING; - iwlagn_stop_device(priv); + trans_stop_device(&priv->trans); dev_kfree_skb(priv->beacon_skb); priv->beacon_skb = NULL; @@ -2131,55 +1820,6 @@ static void iwl_down(struct iwl_priv *priv) iwl_cancel_deferred_work(priv); } -#define HW_READY_TIMEOUT (50) - -/* Note: returns poll_bit return value, which is >= 0 if success */ -static int iwl_set_hw_ready(struct iwl_priv *priv) -{ - int ret; - - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); - - /* See if we got it */ - ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, - HW_READY_TIMEOUT); - - IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : ""); - return ret; -} - -/* Note: returns standard 0/-ERROR code */ -int iwl_prepare_card_hw(struct iwl_priv *priv) -{ - int ret; - - IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n"); - - ret = iwl_set_hw_ready(priv); - if (ret >= 0) - return 0; - - /* If HW is not ready, prepare the conditions to check again */ - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE); - - ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, - ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, - CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); - - if (ret < 0) - return ret; - - /* HW should be ready by now, check again. */ - ret = iwl_set_hw_ready(priv); - if (ret >= 0) - return 0; - return ret; -} - #define MAX_HW_RESTARTS 5 static int __iwl_up(struct iwl_priv *priv) @@ -2210,8 +1850,7 @@ static int __iwl_up(struct iwl_priv *priv) ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_rt, - UCODE_SUBTYPE_REGULAR, - UCODE_SUBTYPE_REGULAR_NEW); + IWL_UCODE_REGULAR); if (ret) { IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); goto error; @@ -2266,6 +1905,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv) u8 bt_ci_compliance; u8 bt_load; u8 bt_status; + bool bt_is_sco; lockdep_assert_held(&priv->mutex); @@ -2286,6 +1926,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv) bt_ci_compliance = priv->bt_ci_compliance; bt_load = priv->bt_traffic_load; bt_status = priv->bt_status; + bt_is_sco = priv->bt_is_sco; __iwl_down(priv); @@ -2293,6 +1934,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv) priv->bt_ci_compliance = bt_ci_compliance; priv->bt_traffic_load = bt_load; priv->bt_status = bt_status; + priv->bt_is_sco = bt_is_sco; } static void iwl_bg_restart(struct work_struct *data) @@ -2313,19 +1955,6 @@ static void iwl_bg_restart(struct work_struct *data) } } -static void iwl_bg_rx_replenish(struct work_struct *data) -{ - struct iwl_priv *priv = - container_of(data, struct iwl_priv, rx_replenish); - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - mutex_lock(&priv->mutex); - iwlagn_rx_replenish(priv); - mutex_unlock(&priv->mutex); -} - static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, @@ -2360,7 +1989,7 @@ static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb, /* TODO: queue up if scanning? */ if (test_bit(STATUS_SCANNING, &priv->status) || - priv->_agn.offchan_tx_skb) { + priv->offchan_tx_skb) { ret = -EBUSY; goto out; } @@ -2374,14 +2003,14 @@ static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb, goto out; } - priv->_agn.offchan_tx_skb = skb; - priv->_agn.offchan_tx_timeout = wait; - priv->_agn.offchan_tx_chan = chan; + priv->offchan_tx_skb = skb; + priv->offchan_tx_timeout = wait; + priv->offchan_tx_chan = chan; ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif, IWL_SCAN_OFFCH_TX, chan->band); if (ret) - priv->_agn.offchan_tx_skb = NULL; + priv->offchan_tx_skb = NULL; out: mutex_unlock(&priv->mutex); free: @@ -2398,12 +2027,12 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw) mutex_lock(&priv->mutex); - if (!priv->_agn.offchan_tx_skb) { + if (!priv->offchan_tx_skb) { ret = -EINVAL; goto unlock; } - priv->_agn.offchan_tx_skb = NULL; + priv->offchan_tx_skb = NULL; ret = iwl_scan_cancel_timeout(priv, 200); if (ret) @@ -2420,6 +2049,77 @@ unlock: * *****************************************************************************/ +static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_AP), + }, +}; + +static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, +}; + +static const struct ieee80211_iface_combination +iwlagn_iface_combinations_dualmode[] = { + { .num_different_channels = 1, + .max_interfaces = 2, + .beacon_int_infra_match = true, + .limits = iwlagn_sta_ap_limits, + .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits), + }, + { .num_different_channels = 1, + .max_interfaces = 2, + .limits = iwlagn_2sta_limits, + .n_limits = ARRAY_SIZE(iwlagn_2sta_limits), + }, +}; + +static const struct ieee80211_iface_combination +iwlagn_iface_combinations_p2p[] = { + { .num_different_channels = 1, + .max_interfaces = 2, + .beacon_int_infra_match = true, + .limits = iwlagn_p2p_sta_go_limits, + .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits), + }, + { .num_different_channels = 1, + .max_interfaces = 2, + .limits = iwlagn_p2p_2sta_limits, + .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits), + }, +}; + /* * Not a mac80211 entry point function, but it fits in with all the * other mac80211 functions grouped here. @@ -2445,7 +2145,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, hw->flags |= IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; - if (priv->cfg->sku & IWL_SKU_N) + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_SUPPORTS_STATIC_SMPS; @@ -2460,17 +2160,45 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; } + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); + + if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) { + hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwlagn_iface_combinations_p2p); + } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { + hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode; + hw->wiphy->n_iface_combinations = + ARRAY_SIZE(iwlagn_iface_combinations_dualmode); + } + hw->wiphy->max_remain_on_channel_duration = 1000; hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | WIPHY_FLAG_IBSS_RSN; - /* - * For now, disable PS by default because it affects - * RX performance significantly. - */ - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + if (priv->ucode_wowlan.code.len && device_can_wakeup(priv->bus->dev)) { + hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE; + if (!iwlagn_mod_params.sw_crypto) + hw->wiphy->wowlan.flags |= + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE; + + hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS; + hw->wiphy->wowlan.pattern_min_len = + IWLAGN_WOWLAN_MIN_PATTERN_LEN; + hw->wiphy->wowlan.pattern_max_len = + IWLAGN_WOWLAN_MAX_PATTERN_LEN; + } + + if (iwlagn_mod_params.power_save) + hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; /* we create the 802.11 header and a zero-length SSID element */ @@ -2551,6 +2279,471 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw) IWL_DEBUG_MAC80211(priv, "leave\n"); } +#ifdef CONFIG_PM +static int iwlagn_send_patterns(struct iwl_priv *priv, + struct cfg80211_wowlan *wowlan) +{ + struct iwlagn_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = REPLY_WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .flags = CMD_SYNC, + }; + int i, err; + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = sizeof(*pattern_cmd) + + wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern); + + pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); + + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + memcpy(&pattern_cmd->patterns[i].mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].mask_size = mask_len; + pattern_cmd->patterns[i].pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + err = trans_send_cmd(&priv->trans, &cmd); + kfree(pattern_cmd); + return err; +} +#endif + +static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct iwl_priv *priv = hw->priv; + + if (iwlagn_mod_params.sw_crypto) + return; + + mutex_lock(&priv->mutex); + + if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) + goto out; + + memcpy(priv->kek, data->kek, NL80211_KEK_LEN); + memcpy(priv->kck, data->kck, NL80211_KCK_LEN); + priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr)); + priv->have_rekey_data = true; + + out: + mutex_unlock(&priv->mutex); +} + +struct wowlan_key_data { + struct iwl_rxon_context *ctx; + struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc; + struct iwlagn_wowlan_tkip_params_cmd *tkip; + const u8 *bssid; + bool error, use_rsc_tsc, use_tkip; +}; + +#ifdef CONFIG_PM +static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) +{ + int i; + + for (i = 0; i < IWLAGN_P1K_SIZE; i++) + out[i] = cpu_to_le16(p1k[i]); +} + +static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_priv *priv = hw->priv; + struct wowlan_key_data *data = _data; + struct iwl_rxon_context *ctx = data->ctx; + struct aes_sc *aes_sc, *aes_tx_sc = NULL; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct iwlagn_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWLAGN_P1K_SIZE]; + int ret, i; + + mutex_lock(&priv->mutex); + + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && + !sta && !ctx->key_mapping_keys) + ret = iwl_set_default_wep_key(priv, ctx, key); + else + ret = iwl_set_dynamic_key(priv, ctx, key, sta); + + if (ret) { + IWL_ERR(priv, "Error setting key during suspend!\n"); + data->error = true; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; + + rx_p1ks = data->tkip->rx_uni; + + ieee80211_get_key_tx_seq(key, &seq); + tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + + ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); + + memcpy(data->tkip->mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + rx_mic_key = data->tkip->mic_keys.rx_unicast; + } else { + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; + rx_p1ks = data->tkip->rx_multi; + rx_mic_key = data->tkip->mic_keys.rx_mcast; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, data->bssid, + cur_rx_iv32 + 1, p1k); + iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWLAGN_MIC_KEY_SIZE); + + data->use_tkip = true; + data->use_rsc_tsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + if (sta) { + u8 *pn = seq.ccmp.pn; + + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; + + ieee80211_get_key_tx_seq(key, &seq); + aes_tx_sc->pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } else + aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 for checking the IV in the frames. + */ + for (i = 0; i < IWLAGN_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc->pn = cpu_to_le64( + (u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + data->use_rsc_tsc = true; + break; + } + + mutex_unlock(&priv->mutex); +} + +static int iwlagn_mac_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_priv *priv = hw->priv; + struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd; + struct iwl_rxon_cmd rxon; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd; + struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {}; + struct wowlan_key_data key_data = { + .ctx = ctx, + .bssid = ctx->active.bssid_addr, + .use_rsc_tsc = false, + .tkip = &tkip_cmd, + .use_tkip = false, + }; + int ret, i; + u16 seq; + + if (WARN_ON(!wowlan)) + return -EINVAL; + + mutex_lock(&priv->mutex); + + /* Don't attempt WoWLAN when not associated, tear down instead. */ + if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || + !iwl_is_associated_ctx(ctx)) { + ret = 1; + goto out; + } + + key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); + if (!key_data.rsc_tsc) { + ret = -ENOMEM; + goto out; + } + + memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd)); + + /* + * We know the last used seqno, and the uCode expects to know that + * one, it will increment before TX. + */ + seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ; + wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq); + + /* + * For QoS counters, we store the one to use next, so subtract 0x10 + * since the uCode will add 0x10 before using the value. + */ + for (i = 0; i < 8; i++) { + seq = priv->stations[IWL_AP_ID].tid[i].seq_number; + seq -= 0x10; + wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); + } + + if (wowlan->disconnect) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS | + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->rfkill_release) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL); + if (wowlan->n_patterns) + wakeup_filter_cmd.enabled |= + cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH); + + iwl_scan_cancel_timeout(priv, 200); + + memcpy(&rxon, &ctx->active, sizeof(rxon)); + + trans_stop_device(&priv->trans); + + priv->wowlan = true; + + ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan, + IWL_UCODE_WOWLAN); + if (ret) + goto error; + + /* now configure WoWLAN ucode */ + ret = iwl_alive_start(priv); + if (ret) + goto error; + + memcpy(&ctx->staging, &rxon, sizeof(rxon)); + ret = iwlagn_commit_rxon(priv, ctx); + if (ret) + goto error; + + ret = iwl_power_update_mode(priv, true); + if (ret) + goto error; + + if (!iwlagn_mod_params.sw_crypto) { + /* mark all keys clear */ + priv->ucode_key_table = 0; + ctx->key_mapping_keys = 0; + + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&priv->mutex); + ieee80211_iter_keys(priv->hw, ctx->vif, + iwlagn_wowlan_program_keys, + &key_data); + mutex_lock(&priv->mutex); + if (key_data.error) { + ret = -EIO; + goto error; + } + + if (key_data.use_rsc_tsc) { + struct iwl_host_cmd rsc_tsc_cmd = { + .id = REPLY_WOWLAN_TSC_RSC_PARAMS, + .flags = CMD_SYNC, + .data[0] = key_data.rsc_tsc, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .len[0] = sizeof(*key_data.rsc_tsc), + }; + + ret = trans_send_cmd(&priv->trans, &rsc_tsc_cmd); + if (ret) + goto error; + } + + if (key_data.use_tkip) { + ret = trans_send_cmd_pdu(&priv->trans, + REPLY_WOWLAN_TKIP_PARAMS, + CMD_SYNC, sizeof(tkip_cmd), + &tkip_cmd); + if (ret) + goto error; + } + + if (priv->have_rekey_data) { + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); + memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN); + kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN); + kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + kek_kck_cmd.replay_ctr = priv->replay_ctr; + + ret = trans_send_cmd_pdu(&priv->trans, + REPLY_WOWLAN_KEK_KCK_MATERIAL, + CMD_SYNC, sizeof(kek_kck_cmd), + &kek_kck_cmd); + if (ret) + goto error; + } + } + + ret = trans_send_cmd_pdu(&priv->trans, REPLY_WOWLAN_WAKEUP_FILTER, + CMD_SYNC, sizeof(wakeup_filter_cmd), + &wakeup_filter_cmd); + if (ret) + goto error; + + ret = iwlagn_send_patterns(priv, wowlan); + if (ret) + goto error; + + device_set_wakeup_enable(priv->bus->dev, true); + + /* Now let the ucode operate on its own */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + goto out; + + error: + priv->wowlan = false; + iwlagn_prepare_restart(priv); + ieee80211_restart_hw(priv->hw); + out: + mutex_unlock(&priv->mutex); + kfree(key_data.rsc_tsc); + return ret; +} + +static int iwlagn_mac_resume(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif; + unsigned long flags; + u32 base, status = 0xffffffff; + int ret = -EIO; + + mutex_lock(&priv->mutex); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); + + base = priv->device_pointers.error_event_table; + if (iwlagn_hw_valid_rtc_data_addr(base)) { + spin_lock_irqsave(&priv->reg_lock, flags); + ret = iwl_grab_nic_access_silent(priv); + if (ret == 0) { + iwl_write32(priv, HBUS_TARG_MEM_RADDR, base); + status = iwl_read32(priv, HBUS_TARG_MEM_RDAT); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, flags); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (ret == 0) { + if (!priv->wowlan_sram) + priv->wowlan_sram = + kzalloc(priv->ucode_wowlan.data.len, + GFP_KERNEL); + + if (priv->wowlan_sram) + _iwl_read_targ_mem_words( + priv, 0x800000, priv->wowlan_sram, + priv->ucode_wowlan.data.len / 4); + } +#endif + } + + /* we'll clear ctx->vif during iwlagn_prepare_restart() */ + vif = ctx->vif; + + priv->wowlan = false; + + device_set_wakeup_enable(priv->bus->dev, false); + + iwlagn_prepare_restart(priv); + + memset((void *)&ctx->active, 0, sizeof(ctx->active)); + iwl_connection_init_rx_config(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); + + mutex_unlock(&priv->mutex); + + ieee80211_resume_disconnect(vif); + + return 1; +} +#endif + static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct iwl_priv *priv = hw->priv; @@ -2573,14 +2766,8 @@ static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, u32 iv32, u16 *phase1key) { struct iwl_priv *priv = hw->priv; - struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - iwl_update_tkip_key(priv, vif_priv->ctx, keyconf, sta, - iv32, phase1key); - IWL_DEBUG_MAC80211(priv, "leave\n"); + iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); } static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -2592,7 +2779,6 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *ctx = vif_priv->ctx; int ret; - u8 sta_id; bool is_default_wep_key = false; IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -2603,20 +2789,27 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } /* - * To support IBSS RSN, don't program group keys in IBSS, the - * hardware will then not attempt to decrypt the frames. + * We could program these keys into the hardware as well, but we + * don't expect much multicast traffic in IBSS and having keys + * for more stations is probably more useful. + * + * Mark key TX-only and return 0. */ if (vif->type == NL80211_IFTYPE_ADHOC && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EOPNOTSUPP; + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + key->hw_key_idx = WEP_INVALID_OFFSET; + return 0; + } - sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta); - if (sta_id == IWL_INVALID_STATION) - return -EINVAL; + /* If they key was TX-only, accept deletion */ + if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) + return 0; mutex_lock(&priv->mutex); iwl_scan_cancel_timeout(priv, 100); + BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); + /* * If we are getting WEP group key and we didn't receive any key mapping * so far, we are in legacy wep mode (group key only), otherwise we are @@ -2624,22 +2817,30 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, * In legacy wep mode, we use another host command to the uCode. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) && - !sta) { + key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { if (cmd == SET_KEY) is_default_wep_key = !ctx->key_mapping_keys; else is_default_wep_key = - (key->hw_key_idx == HW_KEY_DEFAULT); + key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT; } + switch (cmd) { case SET_KEY: - if (is_default_wep_key) + if (is_default_wep_key) { ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key); - else - ret = iwl_set_dynamic_key(priv, vif_priv->ctx, - key, sta_id); + break; + } + ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta); + if (ret) { + /* + * can't add key for RX, but we don't need it + * in the device for TX so still return 0 + */ + ret = 0; + key->hw_key_idx = WEP_INVALID_OFFSET; + } IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); break; @@ -2647,7 +2848,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (is_default_wep_key) ret = iwl_remove_default_wep_key(priv, ctx, key); else - ret = iwl_remove_dynamic_key(priv, ctx, key, sta_id); + ret = iwl_remove_dynamic_key(priv, ctx, key, sta); IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); break; @@ -2674,7 +2875,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", sta->addr, tid); - if (!(priv->cfg->sku & IWL_SKU_N)) + if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)) return -EACCES; mutex_lock(&priv->mutex); @@ -2694,29 +2895,26 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); if (ret == 0) { - priv->_agn.agg_tids_count++; - IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", - priv->_agn.agg_tids_count); + priv->agg_tids_count++; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); } break; case IEEE80211_AMPDU_TX_STOP: IWL_DEBUG_HT(priv, "stop Tx\n"); ret = iwlagn_tx_agg_stop(priv, vif, sta, tid); - if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { - priv->_agn.agg_tids_count--; - IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", - priv->_agn.agg_tids_count); + if ((ret == 0) && (priv->agg_tids_count > 0)) { + priv->agg_tids_count--; + IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", + priv->agg_tids_count); } if (test_bit(STATUS_EXIT_PENDING, &priv->status)) ret = 0; if (priv->cfg->ht_params && priv->cfg->ht_params->use_rts_for_aggregation) { - struct iwl_station_priv *sta_priv = - (void *) sta->drv_priv; /* * switch off RTS/CTS if it was previously enabled */ - sta_priv->lq_sta.lq.general_params.flags &= ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), @@ -2726,7 +2924,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_OPERATIONAL: buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); - iwlagn_txq_agg_queue_setup(priv, sta, tid, buf_size); + trans_txq_agg_setup(&priv->trans, iwl_sta_id(sta), tid, + buf_size); /* * If the limit is 0, then it wasn't initialised yet, @@ -2764,6 +2963,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), &sta_priv->lq_sta.lq, CMD_ASYNC, false); + + IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", + sta->addr, tid); ret = 0; break; } @@ -2833,7 +3035,6 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, */ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; u16 ch; - unsigned long flags = 0; IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -2850,65 +3051,64 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, if (!iwl_is_associated_ctx(ctx)) goto out; - if (priv->cfg->ops->lib->set_channel_switch) { + if (!priv->cfg->lib->set_channel_switch) + goto out; - ch = channel->hw_value; - if (le16_to_cpu(ctx->active.channel) != ch) { - ch_info = iwl_get_channel_info(priv, - channel->band, - ch); - if (!is_channel_valid(ch_info)) { - IWL_DEBUG_MAC80211(priv, "invalid channel\n"); - goto out; - } - spin_lock_irqsave(&priv->lock, flags); - - priv->current_ht_config.smps = conf->smps_mode; - - /* Configure HT40 channels */ - ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else - ctx->ht.is_40mhz = false; - - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - iwl_set_rxon_channel(priv, channel, ctx); - iwl_set_rxon_ht(priv, ht_conf); - iwl_set_flags_for_band(priv, ctx, channel->band, - ctx->vif); - spin_unlock_irqrestore(&priv->lock, flags); - - iwl_set_rate(priv); - /* - * at this point, staging_rxon has the - * configuration for channel switch - */ - set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); - priv->switch_channel = cpu_to_le16(ch); - if (priv->cfg->ops->lib->set_channel_switch(priv, - ch_switch)) { - clear_bit(STATUS_CHANNEL_SWITCH_PENDING, - &priv->status); - priv->switch_channel = 0; - ieee80211_chswitch_done(ctx->vif, false); - } + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) == ch) + goto out; + + ch_info = iwl_get_channel_info(priv, channel->band, ch); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "invalid channel\n"); + goto out; + } + + spin_lock_irq(&priv->lock); + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ctx->ht.enabled = conf_is_ht(conf); + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; } + } else + ctx->ht.is_40mhz = false; + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_set_rxon_channel(priv, channel, ctx); + iwl_set_rxon_ht(priv, ht_conf); + iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); + + spin_unlock_irq(&priv->lock); + + iwl_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); + priv->switch_channel = cpu_to_le16(ch); + if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) { + clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); + priv->switch_channel = 0; + ieee80211_chswitch_done(ctx->vif, false); } + out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); @@ -2971,10 +3171,6 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) mutex_lock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "enter\n"); - /* do not support "flush" */ - if (!priv->cfg->ops->lib->txfifo_flush) - goto done; - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); goto done; @@ -2990,7 +3186,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) */ if (drop) { IWL_DEBUG_MAC80211(priv, "send flush command\n"); - if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) { + if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { IWL_ERR(priv, "flush request fail\n"); goto done; } @@ -3017,9 +3213,9 @@ static void iwlagn_disable_roc(struct iwl_priv *priv) iwl_set_rxon_channel(priv, chan, ctx); iwl_set_flags_for_band(priv, ctx, chan->band, NULL); - priv->_agn.hw_roc_channel = NULL; + priv->hw_roc_channel = NULL; - iwlcore_commit_rxon(priv, ctx); + iwlagn_commit_rxon(priv, ctx); ctx->is_active = false; } @@ -3027,7 +3223,7 @@ static void iwlagn_disable_roc(struct iwl_priv *priv) static void iwlagn_bg_roc_done(struct work_struct *work) { struct iwl_priv *priv = container_of(work, struct iwl_priv, - _agn.hw_roc_work.work); + hw_roc_work.work); mutex_lock(&priv->mutex); ieee80211_remain_on_channel_expired(priv->hw); @@ -3059,11 +3255,11 @@ static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw, } priv->contexts[IWL_RXON_CTX_PAN].is_active = true; - priv->_agn.hw_roc_channel = channel; - priv->_agn.hw_roc_chantype = channel_type; - priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024); - iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]); - queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work, + priv->hw_roc_channel = channel; + priv->hw_roc_chantype = channel_type; + priv->hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024); + iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]); + queue_delayed_work(priv->workqueue, &priv->hw_roc_work, msecs_to_jiffies(duration + 20)); msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */ @@ -3082,7 +3278,7 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) return -EOPNOTSUPP; - cancel_delayed_work_sync(&priv->_agn.hw_roc_work); + cancel_delayed_work_sync(&priv->hw_roc_work); mutex_lock(&priv->mutex); iwlagn_disable_roc(priv); @@ -3104,18 +3300,17 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) init_waitqueue_head(&priv->wait_command_queue); INIT_WORK(&priv->restart, iwl_bg_restart); - INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency); INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); - INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done); + INIT_DELAYED_WORK(&priv->hw_roc_work, iwlagn_bg_roc_done); iwl_setup_scan_deferred_work(priv); - if (priv->cfg->ops->lib->setup_deferred_work) - priv->cfg->ops->lib->setup_deferred_work(priv); + if (priv->cfg->lib->bt_setup_deferred_work) + priv->cfg->lib->bt_setup_deferred_work(priv); init_timer(&priv->statistics_periodic); priv->statistics_periodic.data = (unsigned long)priv; @@ -3128,15 +3323,12 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) init_timer(&priv->watchdog); priv->watchdog.data = (unsigned long)priv; priv->watchdog.function = iwl_bg_watchdog; - - tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) - iwl_irq_tasklet, (unsigned long)priv); } static void iwl_cancel_deferred_work(struct iwl_priv *priv) { - if (priv->cfg->ops->lib->cancel_deferred_work) - priv->cfg->ops->lib->cancel_deferred_work(priv); + if (priv->cfg->lib->cancel_deferred_work) + priv->cfg->lib->cancel_deferred_work(priv); cancel_work_sync(&priv->run_time_calib_work); cancel_work_sync(&priv->beacon_update); @@ -3187,7 +3379,7 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->iw_mode = NL80211_IFTYPE_STATION; priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; - priv->_agn.agg_tids_count = 0; + priv->agg_tids_count = 0; /* initialize force reset */ priv->force_reset[IWL_RF_RESET].reset_duration = @@ -3198,9 +3390,7 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->rx_statistics_jiffies = jiffies; /* Choose which receivers/antennas to use */ - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, - &priv->contexts[IWL_RXON_CTX_BSS]); + iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]); iwl_init_scan_params(priv); @@ -3243,12 +3433,42 @@ static void iwl_uninit_drv(struct iwl_priv *priv) iwl_free_channel_map(priv); kfree(priv->scan_cmd); kfree(priv->beacon_cmd); +#ifdef CONFIG_IWLWIFI_DEBUGFS + kfree(priv->wowlan_sram); +#endif +} + +static void iwl_mac_rssi_callback(struct ieee80211_hw *hw, + enum ieee80211_rssi_event rssi_event) +{ + struct iwl_priv *priv = hw->priv; + + mutex_lock(&priv->mutex); + + if (priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist) { + if (rssi_event == RSSI_EVENT_LOW) + priv->bt_enable_pspoll = true; + else if (rssi_event == RSSI_EVENT_HIGH) + priv->bt_enable_pspoll = false; + + iwlagn_send_advance_bt_config(priv); + } else { + IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled," + "ignoring RSSI callback\n"); + } + + mutex_unlock(&priv->mutex); } struct ieee80211_ops iwlagn_hw_ops = { .tx = iwlagn_mac_tx, .start = iwlagn_mac_start, .stop = iwlagn_mac_stop, +#ifdef CONFIG_PM + .suspend = iwlagn_mac_suspend, + .resume = iwlagn_mac_resume, +#endif .add_interface = iwl_mac_add_interface, .remove_interface = iwl_mac_remove_interface, .change_interface = iwl_mac_change_interface, @@ -3256,6 +3476,7 @@ struct ieee80211_ops iwlagn_hw_ops = { .configure_filter = iwlagn_configure_filter, .set_key = iwlagn_mac_set_key, .update_tkip_key = iwlagn_mac_update_tkip_key, + .set_rekey_data = iwlagn_mac_set_rekey_data, .conf_tx = iwl_mac_conf_tx, .bss_info_changed = iwlagn_bss_info_changed, .ampdu_action = iwlagn_mac_ampdu_action, @@ -3270,15 +3491,13 @@ struct ieee80211_ops iwlagn_hw_ops = { .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel, .offchannel_tx = iwl_mac_offchannel_tx, .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait, + .rssi_callback = iwl_mac_rssi_callback, CFG80211_TESTMODE_CMD(iwl_testmode_cmd) + CFG80211_TESTMODE_DUMP(iwl_testmode_dump) }; static u32 iwl_hw_detect(struct iwl_priv *priv) { - u8 rev_id; - - pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); - IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); return iwl_read32(priv, CSR_HW_REV); } @@ -3294,10 +3513,10 @@ static int iwl_set_hw_params(struct iwl_priv *priv) priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; if (iwlagn_mod_params.disable_11n) - priv->cfg->sku &= ~IWL_SKU_N; + priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE; /* Device-specific setup */ - return priv->cfg->ops->lib->set_hw_params(priv); + return priv->cfg->lib->set_hw_params(priv); } static const u8 iwlagn_bss_ac_to_fifo[] = { @@ -3344,29 +3563,9 @@ out: return hw; } -static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static void iwl_init_context(struct iwl_priv *priv) { - int err = 0, i; - struct iwl_priv *priv; - struct ieee80211_hw *hw; - struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); - unsigned long flags; - u16 pci_cmd, num_mac; - u32 hw_rev; - - /************************ - * 1. Allocating HW data - ************************/ - - hw = iwl_alloc_all(cfg); - if (!hw) { - err = -ENOMEM; - goto out; - } - priv = hw->priv; - /* At this point both hw and priv are allocated. */ - - priv->ucode_type = UCODE_SUBTYPE_NONE_LOADED; + int i; /* * The default context is always valid, @@ -3398,8 +3597,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON; - priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = REPLY_WIPAN_RXON_TIMING; - priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd = REPLY_WIPAN_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = + REPLY_WIPAN_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd = + REPLY_WIPAN_RXON_ASSOC; priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM; priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN; priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY; @@ -3419,12 +3620,35 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); +} - SET_IEEE80211_DEV(hw, &pdev->dev); +int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg) +{ + int err = 0; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + u16 num_mac; + u32 hw_rev; + + /************************ + * 1. Allocating HW data + ************************/ + hw = iwl_alloc_all(cfg); + if (!hw) { + err = -ENOMEM; + goto out; + } + + priv = hw->priv; + priv->bus = bus; + bus_set_drv_data(priv->bus, priv); + + /* At this point both hw and priv are allocated. */ + + SET_IEEE80211_DEV(hw, priv->bus->dev); IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); priv->cfg = cfg; - priv->pci_dev = pdev; priv->inta_mask = CSR_INI_SET_MASK; /* is antenna coupling more than 35dB ? */ @@ -3440,53 +3664,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (iwl_alloc_traffic_mem(priv)) IWL_ERR(priv, "Not enough memory to generate traffic log\n"); - /************************** - * 2. Initializing PCI bus - **************************/ - pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM); - - if (pci_enable_device(pdev)) { - err = -ENODEV; - goto out_ieee80211_free_hw; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - /* both attempts failed: */ - if (err) { - IWL_WARN(priv, "No suitable DMA available.\n"); - goto out_pci_disable_device; - } - } - - err = pci_request_regions(pdev, DRV_NAME); - if (err) - goto out_pci_disable_device; - - pci_set_drvdata(pdev, priv); - - - /*********************** - * 3. Read REV register - ***********************/ - priv->hw_base = pci_iomap(pdev, 0, 0); - if (!priv->hw_base) { - err = -ENODEV; - goto out_pci_release_regions; - } - - IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", - (unsigned long long) pci_resource_len(pdev, 0)); - IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); - /* these spin locks will be used in apm_ops.init and EEPROM access * we should init now */ @@ -3500,17 +3677,21 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + /*********************** + * 3. Read REV register + ***********************/ hw_rev = iwl_hw_detect(priv); IWL_INFO(priv, "Detected %s, REV=0x%X\n", priv->cfg->name, hw_rev); - /* We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + err = iwl_trans_register(&priv->trans, priv); + if (err) + goto out_free_traffic_mem; - if (iwl_prepare_card_hw(priv)) { + if (trans_prepare_card_hw(&priv->trans)) { + err = -EIO; IWL_WARN(priv, "Failed, HW not ready\n"); - goto out_iounmap; + goto out_free_trans; } /***************** @@ -3520,7 +3701,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = iwl_eeprom_init(priv, hw_rev); if (err) { IWL_ERR(priv, "Unable to init EEPROM\n"); - goto out_iounmap; + goto out_free_trans; } err = iwl_eeprom_check_version(priv); if (err) @@ -3543,10 +3724,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->hw->wiphy->n_addresses++; } + /* initialize all valid contexts */ + iwl_init_context(priv); + /************************ * 5. Setup HW constants ************************/ if (iwl_set_hw_params(priv)) { + err = -ENOENT; IWL_ERR(priv, "failed to set hw parameters\n"); goto out_free_eeprom; } @@ -3563,36 +3748,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /******************** * 7. Setup services ********************/ - spin_lock_irqsave(&priv->lock, flags); - iwl_disable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - - pci_enable_msi(priv->pci_dev); - - iwl_alloc_isr_ict(priv); - - err = request_irq(priv->pci_dev->irq, iwl_isr_ict, - IRQF_SHARED, DRV_NAME, priv); - if (err) { - IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); - goto out_disable_msi; - } - iwl_setup_deferred_work(priv); iwl_setup_rx_handlers(priv); iwl_testmode_init(priv); /********************************************* - * 8. Enable interrupts and read RFKILL state + * 8. Enable interrupts *********************************************/ - /* enable rfkill interrupt: hw bug w/a */ - pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); - if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { - pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); - } - iwl_enable_rfkill_int(priv); /* If platform's RF_KILL switch is NOT set to KILL */ @@ -3607,7 +3770,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_power_initialize(priv); iwl_tt_initialize(priv); - init_completion(&priv->_agn.firmware_loading_complete); + init_completion(&priv->firmware_loading_complete); err = iwl_request_firmware(priv, true); if (err) @@ -3615,44 +3778,32 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; - out_destroy_workqueue: +out_destroy_workqueue: destroy_workqueue(priv->workqueue); priv->workqueue = NULL; - free_irq(priv->pci_dev->irq, priv); - iwl_free_isr_ict(priv); - out_disable_msi: - pci_disable_msi(priv->pci_dev); iwl_uninit_drv(priv); - out_free_eeprom: +out_free_eeprom: iwl_eeprom_free(priv); - out_iounmap: - pci_iounmap(pdev, priv->hw_base); - out_pci_release_regions: - pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); - out_pci_disable_device: - pci_disable_device(pdev); - out_ieee80211_free_hw: +out_free_trans: + trans_free(&priv->trans); +out_free_traffic_mem: iwl_free_traffic_mem(priv); ieee80211_free_hw(priv->hw); - out: +out: return err; } -static void __devexit iwl_pci_remove(struct pci_dev *pdev) +void __devexit iwl_remove(struct iwl_priv * priv) { - struct iwl_priv *priv = pci_get_drvdata(pdev); unsigned long flags; - if (!priv) - return; - - wait_for_completion(&priv->_agn.firmware_loading_complete); + wait_for_completion(&priv->firmware_loading_complete); IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); iwl_dbgfs_unregister(priv); - sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + sysfs_remove_group(&priv->bus->dev->kobj, + &iwl_attribute_group); /* ieee80211_unregister_hw call wil cause iwl_mac_stop to * to be called and iwl_down since we are removing the device @@ -3680,17 +3831,15 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) iwl_disable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); - iwl_synchronize_irq(priv); + trans_sync_irq(&priv->trans); - iwl_dealloc_ucode_pci(priv); + iwl_dealloc_ucode(priv); - if (priv->rxq.bd) - iwlagn_rx_queue_free(priv, &priv->rxq); - iwlagn_hw_txq_ctx_free(priv); + trans_rx_free(&priv->trans); + trans_tx_free(&priv->trans); iwl_eeprom_free(priv); - /*netif_stop_queue(dev); */ flush_workqueue(priv->workqueue); @@ -3701,16 +3850,11 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) priv->workqueue = NULL; iwl_free_traffic_mem(priv); - free_irq(priv->pci_dev->irq, priv); - pci_disable_msi(priv->pci_dev); - pci_iounmap(pdev, priv->hw_base); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + trans_free(&priv->trans); - iwl_uninit_drv(priv); + bus_set_drv_data(priv->bus, NULL); - iwl_free_isr_ict(priv); + iwl_uninit_drv(priv); dev_kfree_skb(priv->beacon_skb); @@ -3723,206 +3867,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) * driver and module entry point * *****************************************************************************/ - -/* Hardware specific file defines the PCI IDs table for that hardware module */ -static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { - {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ - -/* 5300 Series WiFi */ - {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ - -/* 5350 Series WiFi/WiMax */ - {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ - -/* 5150 Series Wifi/WiMax */ - {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ - - {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ - {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ - -/* 6x00 Series */ - {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, - {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, - {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, - {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, - -/* 6x05 Series */ - {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, - -/* 6x30 Series */ - {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, - {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, - -/* 6x50 WiFi/WiMax Series */ - {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, - {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, - {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, - -/* 6150 WiFi/WiMax Series */ - {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, - {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, - -/* 1000 Series WiFi */ - {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, - {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, - -/* 100 Series WiFi */ - {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, - {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, - {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, - -/* 130 Series WiFi */ - {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, - {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, - -/* 2x00 Series */ - {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)}, - -/* 2x30 Series */ - {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)}, - {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)}, - -/* 6x35 Series */ - {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)}, - {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)}, - {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)}, - -/* 105 Series */ - {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)}, - {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)}, - {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)}, - -/* 135 Series */ - {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)}, - {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)}, - {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)}, - - {0} -}; -MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); - -static struct pci_driver iwl_driver = { - .name = DRV_NAME, - .id_table = iwl_hw_card_ids, - .probe = iwl_pci_probe, - .remove = __devexit_p(iwl_pci_remove), - .driver.pm = IWL_PM_OPS, -}; - static int __init iwl_init(void) { @@ -3936,12 +3880,10 @@ static int __init iwl_init(void) return ret; } - ret = pci_register_driver(&iwl_driver); - if (ret) { - pr_err("Unable to initialize PCI module\n"); - goto error_register; - } + ret = iwl_pci_register_driver(); + if (ret) + goto error_register; return ret; error_register: @@ -3951,7 +3893,7 @@ error_register: static void __exit iwl_exit(void) { - pci_unregister_driver(&iwl_driver); + iwl_pci_unregister_driver(); iwlagn_rate_control_unregister(); } @@ -3993,3 +3935,51 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); + +module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); +MODULE_PARM_DESC(wd_disable, + "Disable stuck queue watchdog timer (default: 0 [enabled])"); + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active, + bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); + +module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, "0=system default, " + "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)"); + +module_param_named(power_save, iwlagn_mod_params.power_save, + bool, S_IRUGO); +MODULE_PARM_DESC(power_save, + "enable WiFi power management (default: disable)"); + +module_param_named(power_level, iwlagn_mod_params.power_level, + int, S_IRUGO); +MODULE_PARM_DESC(power_level, + "default power save level (range from 1 - 5, default: 1)"); + +/* + * For now, keep using power level 1 instead of automatically + * adjusting ... + */ +module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust, + bool, S_IRUGO); +MODULE_PARM_DESC(no_sleep_autoadjust, + "don't automatically adjust sleep level " + "according to maximum network latency (default: true)"); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index d1716844002..d941c4c98e4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -109,42 +109,25 @@ extern struct iwl_cfg iwl135_bg_cfg; extern struct iwl_cfg iwl135_bgn_cfg; extern struct iwl_mod_params iwlagn_mod_params; -extern struct iwl_hcmd_ops iwlagn_hcmd; -extern struct iwl_hcmd_ops iwlagn_bt_hcmd; -extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; extern struct ieee80211_ops iwlagn_hw_ops; int iwl_reset_ict(struct iwl_priv *priv); -void iwl_disable_ict(struct iwl_priv *priv); -int iwl_alloc_isr_ict(struct iwl_priv *priv); -void iwl_free_isr_ict(struct iwl_priv *priv); -irqreturn_t iwl_isr_ict(int irq, void *data); -/* call this function to flush any scheduled tasklet */ -static inline void iwl_synchronize_irq(struct iwl_priv *priv) +static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) { - /* wait to make sure we flush pending tasklet*/ - synchronize_irq(priv->pci_dev->irq); - tasklet_kill(&priv->irq_tasklet); + hdr->op_code = cmd; + hdr->first_group = 0; + hdr->groups_num = 1; + hdr->data_valid = 1; } -int iwl_prepare_card_hw(struct iwl_priv *priv); - -int iwlagn_start_device(struct iwl_priv *priv); -void iwlagn_stop_device(struct iwl_priv *priv); - /* tx queue */ -void iwlagn_set_wr_ptrs(struct iwl_priv *priv, - int txq_id, u32 index); -void iwlagn_tx_queue_set_status(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry); -void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask); void iwl_free_tfds_in_queue(struct iwl_priv *priv, int sta_id, int tid, int freed); /* RXON */ +int iwlagn_set_pan_params(struct iwl_priv *priv); int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed); @@ -161,41 +144,29 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv); int iwlagn_run_init_ucode(struct iwl_priv *priv); int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, struct fw_img *image, - int subtype, int alternate_subtype); + enum iwlagn_ucode_type ucode_type); /* lib */ void iwl_check_abort_status(struct iwl_priv *priv, u8 frame_count, u32 status); -void iwlagn_rx_handler_setup(struct iwl_priv *priv); -void iwlagn_setup_deferred_work(struct iwl_priv *priv); int iwlagn_hw_valid_rtc_data_addr(u32 addr); int iwlagn_send_tx_power(struct iwl_priv *priv); void iwlagn_temperature(struct iwl_priv *priv); u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); -const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, - size_t offset); -void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); -int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); -int iwlagn_hw_nic_init(struct iwl_priv *priv); int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv); int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); +int iwlagn_send_beacon_cmd(struct iwl_priv *priv); /* rx */ -void iwlagn_rx_queue_restock(struct iwl_priv *priv); -void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority); -void iwlagn_rx_replenish(struct iwl_priv *priv); -void iwlagn_rx_replenish_now(struct iwl_priv *priv); -void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); -int iwlagn_rxq_stop(struct iwl_priv *priv); int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); void iwl_setup_rx_handlers(struct iwl_priv *priv); +void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); + /* tx */ -void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); -int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset); +void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int index); void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, struct ieee80211_tx_info *info); int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); @@ -203,18 +174,12 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); -void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv, - struct ieee80211_sta *sta, - int tid, int frame_limit); int iwlagn_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id); void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); -void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv); -int iwlagn_txq_ctx_alloc(struct iwl_priv *priv); -void iwlagn_txq_ctx_reset(struct iwl_priv *priv); -void iwlagn_txq_ctx_stop(struct iwl_priv *priv); static inline u32 iwl_tx_status_to_mac80211(u32 status) { @@ -249,10 +214,6 @@ void iwlagn_post_scan(struct iwl_priv *priv); int iwlagn_manage_ibss_station(struct iwl_priv *priv, struct ieee80211_vif *vif, bool add); -/* hcmd */ -int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); -int iwlagn_send_beacon_cmd(struct iwl_priv *priv); - /* bt coex */ void iwlagn_send_advance_bt_config(struct iwl_priv *priv); void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, @@ -260,6 +221,8 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv); void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv); void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv); +void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv); +void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_get_tx_fail_reason(u32 status); @@ -283,11 +246,13 @@ int iwl_set_default_wep_key(struct iwl_priv *priv, int iwl_restore_default_wep_keys(struct iwl_priv *priv, struct iwl_rxon_context *ctx); int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key, u8 sta_id); + struct ieee80211_key_conf *key, + struct ieee80211_sta *sta); int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, - struct ieee80211_key_conf *key, u8 sta_id); + struct ieee80211_key_conf *key, + struct ieee80211_sta *sta); void iwl_update_tkip_key(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, + struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); @@ -296,6 +261,8 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, int tid); void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); +int iwl_update_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); int iwl_update_bcast_stations(struct iwl_priv *priv); void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -343,6 +310,9 @@ extern int iwl_alive_start(struct iwl_priv *priv); /* svtool */ #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len); +extern int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len); extern void iwl_testmode_init(struct iwl_priv *priv); extern void iwl_testmode_cleanup(struct iwl_priv *priv); #else @@ -352,6 +322,13 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) return -ENOSYS; } static inline +int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len) +{ + return -ENOSYS; +} +static inline void iwl_testmode_init(struct iwl_priv *priv) { } diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h new file mode 100644 index 00000000000..f3ee1c0c004 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-bus.h @@ -0,0 +1,139 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_pci_h__ +#define __iwl_pci_h__ + +struct iwl_bus; + +/** + * struct iwl_bus_ops - bus specific operations + * @get_pm_support: must returns true if the bus can go to sleep + * @apm_config: will be called during the config of the APM configuration + * @set_drv_data: set the drv_data pointer to the bus layer + * @get_hw_id: prints the hw_id in the provided buffer + * @write8: write a byte to register at offset ofs + * @write32: write a dword to register at offset ofs + * @wread32: read a dword at register at offset ofs + */ +struct iwl_bus_ops { + bool (*get_pm_support)(struct iwl_bus *bus); + void (*apm_config)(struct iwl_bus *bus); + void (*set_drv_data)(struct iwl_bus *bus, void *drv_data); + void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len); + void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val); + void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val); + u32 (*read32)(struct iwl_bus *bus, u32 ofs); +}; + +struct iwl_bus { + /* Common data to all buses */ + void *drv_data; /* driver's context */ + struct device *dev; + struct iwl_bus_ops *ops; + + unsigned int irq; + + /* pointer to bus specific struct */ + /*Ensure that this pointer will always be aligned to sizeof pointer */ + char bus_specific[0] __attribute__((__aligned__(sizeof(void *)))); +}; + +static inline bool bus_get_pm_support(struct iwl_bus *bus) +{ + return bus->ops->get_pm_support(bus); +} + +static inline void bus_apm_config(struct iwl_bus *bus) +{ + bus->ops->apm_config(bus); +} + +static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data) +{ + bus->ops->set_drv_data(bus, drv_data); +} + +static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len) +{ + bus->ops->get_hw_id(bus, buf, buf_len); +} + +static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val) +{ + bus->ops->write8(bus, ofs, val); +} + +static inline void bus_write32(struct iwl_bus *bus, u32 ofs, u32 val) +{ + bus->ops->write32(bus, ofs, val); +} + +static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs) +{ + return bus->ops->read32(bus, ofs); +} + +int __must_check iwl_pci_register_driver(void); +void iwl_pci_unregister_driver(void); + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 6ee5f1aa555..5769ca5cebc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -188,6 +188,13 @@ enum { REPLY_WIPAN_NOA_NOTIFICATION = 0xbc, REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd, + REPLY_WOWLAN_PATTERNS = 0xe0, + REPLY_WOWLAN_WAKEUP_FILTER = 0xe1, + REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2, + REPLY_WOWLAN_TKIP_PARAMS = 0xe3, + REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4, + REPLY_WOWLAN_GET_STATUS = 0xe5, + REPLY_MAX = 0xff }; @@ -384,18 +391,6 @@ struct iwl_tx_ant_config_cmd { #define UCODE_VALID_OK cpu_to_le32(0x1) -enum iwlagn_ucode_subtype { - UCODE_SUBTYPE_REGULAR = 0, - UCODE_SUBTYPE_REGULAR_NEW = 1, - UCODE_SUBTYPE_INIT = 9, - - /* - * Not a valid subtype, the ucode has just a u8, so - * we can use something > 0xff for this value. - */ - UCODE_SUBTYPE_NONE_LOADED = 0x100, -}; - /** * REPLY_ALIVE = 0x1 (response only, not a command) * @@ -844,6 +839,8 @@ struct iwl_qosparam_cmd { #define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) #define STA_KEY_MAX_NUM 8 #define STA_KEY_MAX_NUM_PAN 16 +/* must not match WEP_INVALID_OFFSET */ +#define IWLAGN_HW_KEY_DEFAULT 0xfe /* Flags indicate whether to modify vs. don't change various station params */ #define STA_MODIFY_KEY_MASK 0x01 @@ -984,15 +981,26 @@ struct iwl_rem_sta_cmd { u8 reserved2[2]; } __packed; -#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) -#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) -#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) -#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) + +/* WiFi queues mask */ +#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0)) +#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1)) +#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2)) +#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3)) +#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3)) + +/* PAN queues mask */ +#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4)) +#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5)) +#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6)) +#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7)) +#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7)) +#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8)) + #define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) #define IWL_DROP_SINGLE 0 -#define IWL_DROP_SELECTED 1 -#define IWL_DROP_ALL 2 +#define IWL_DROP_ALL (BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN)) /* * REPLY_TXFIFO_FLUSH = 0x1e(command and response) @@ -1932,6 +1940,9 @@ struct iwl_bt_cmd { /* Disable Sync PSPoll on SCO/eSCO */ #define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7) +#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD -75 /* dBm */ +#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD -65 /* dBm */ + #define IWLAGN_BT_PRIO_BOOST_MAX 0xFF #define IWLAGN_BT_PRIO_BOOST_MIN 0x00 #define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0 @@ -3153,7 +3164,6 @@ struct iwl_enhance_sensitivity_cmd { /* The default calibrate table size if not specified by firmware */ #define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 enum { - IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, IWL_PHY_CALIBRATE_DC_CMD = 8, IWL_PHY_CALIBRATE_LO_CMD = 9, IWL_PHY_CALIBRATE_TX_IQ_CMD = 11, @@ -3166,22 +3176,36 @@ enum { #define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) -#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff) - /* This enum defines the bitmap of various calibrations to enable in both * init ucode and runtime ucode through CALIBRATION_CFG_CMD. */ enum iwl_ucode_calib_cfg { - IWL_CALIB_CFG_RX_BB_IDX, - IWL_CALIB_CFG_DC_IDX, - IWL_CALIB_CFG_TX_IQ_IDX, - IWL_CALIB_CFG_RX_IQ_IDX, - IWL_CALIB_CFG_NOISE_IDX, - IWL_CALIB_CFG_CRYSTAL_IDX, - IWL_CALIB_CFG_TEMPERATURE_IDX, - IWL_CALIB_CFG_PAPD_IDX, + IWL_CALIB_CFG_RX_BB_IDX = BIT(0), + IWL_CALIB_CFG_DC_IDX = BIT(1), + IWL_CALIB_CFG_LO_IDX = BIT(2), + IWL_CALIB_CFG_TX_IQ_IDX = BIT(3), + IWL_CALIB_CFG_RX_IQ_IDX = BIT(4), + IWL_CALIB_CFG_NOISE_IDX = BIT(5), + IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6), + IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7), + IWL_CALIB_CFG_PAPD_IDX = BIT(8), + IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9), + IWL_CALIB_CFG_TX_PWR_IDX = BIT(10), }; +#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \ + IWL_CALIB_CFG_DC_IDX | \ + IWL_CALIB_CFG_LO_IDX | \ + IWL_CALIB_CFG_TX_IQ_IDX | \ + IWL_CALIB_CFG_RX_IQ_IDX | \ + IWL_CALIB_CFG_NOISE_IDX | \ + IWL_CALIB_CFG_CRYSTAL_IDX | \ + IWL_CALIB_CFG_TEMPERATURE_IDX | \ + IWL_CALIB_CFG_PAPD_IDX | \ + IWL_CALIB_CFG_SENSITIVITY_IDX | \ + IWL_CALIB_CFG_TX_PWR_IDX) + +#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0)) struct iwl_calib_cfg_elmnt_s { __le32 is_enable; @@ -3215,15 +3239,6 @@ struct iwl_calib_cmd { u8 data[0]; } __packed; -/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ -struct iwl_calib_diff_gain_cmd { - struct iwl_calib_hdr hdr; - s8 diff_gain_a; /* see above */ - s8 diff_gain_b; - s8 diff_gain_c; - u8 reserved1; -} __packed; - struct iwl_calib_xtal_freq_cmd { struct iwl_calib_hdr hdr; u8 cap_pin1; @@ -3231,11 +3246,11 @@ struct iwl_calib_xtal_freq_cmd { u8 pad[2]; } __packed; -#define DEFAULT_RADIO_SENSOR_OFFSET 2700 +#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700) struct iwl_calib_temperature_offset_cmd { struct iwl_calib_hdr hdr; - s16 radio_sensor_offset; - s16 reserved; + __le16 radio_sensor_offset; + __le16 reserved; } __packed; /* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ @@ -3756,6 +3771,127 @@ struct iwl_bt_coex_prot_env_cmd { u8 reserved[2]; } __attribute__((packed)); +/* + * REPLY_WOWLAN_PATTERNS + */ +#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16 +#define IWLAGN_WOWLAN_MAX_PATTERN_LEN 128 + +struct iwlagn_wowlan_pattern { + u8 mask[IWLAGN_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 pattern[IWLAGN_WOWLAN_MAX_PATTERN_LEN]; + u8 mask_size; + u8 pattern_size; + __le16 reserved; +} __packed; + +#define IWLAGN_WOWLAN_MAX_PATTERNS 20 + +struct iwlagn_wowlan_patterns_cmd { + __le32 n_patterns; + struct iwlagn_wowlan_pattern patterns[]; +} __packed; + +/* + * REPLY_WOWLAN_WAKEUP_FILTER + */ +enum iwlagn_wowlan_wakeup_filters { + IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), + IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), + IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), + IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), + IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), + IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5), + IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6), + IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7), + IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8), + IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9), + IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10), +}; + +struct iwlagn_wowlan_wakeup_filter_cmd { + __le32 enabled; + __le16 non_qos_seq; + u8 min_sleep_seconds; + u8 reserved; + __le16 qos_seq[8]; +}; + +/* + * REPLY_WOWLAN_TSC_RSC_PARAMS + */ +#define IWLAGN_NUM_RSC 16 + +struct tkip_sc { + __le16 iv16; + __le16 pad; + __le32 iv32; +} __packed; + +struct iwlagn_tkip_rsc_tsc { + struct tkip_sc unicast_rsc[IWLAGN_NUM_RSC]; + struct tkip_sc multicast_rsc[IWLAGN_NUM_RSC]; + struct tkip_sc tsc; +} __packed; + +struct aes_sc { + __le64 pn; +} __packed; + +struct iwlagn_aes_rsc_tsc { + struct aes_sc unicast_rsc[IWLAGN_NUM_RSC]; + struct aes_sc multicast_rsc[IWLAGN_NUM_RSC]; + struct aes_sc tsc; +} __packed; + +union iwlagn_all_tsc_rsc { + struct iwlagn_tkip_rsc_tsc tkip; + struct iwlagn_aes_rsc_tsc aes; +}; + +struct iwlagn_wowlan_rsc_tsc_params_cmd { + union iwlagn_all_tsc_rsc all_tsc_rsc; +} __packed; + +/* + * REPLY_WOWLAN_TKIP_PARAMS + */ +#define IWLAGN_MIC_KEY_SIZE 8 +#define IWLAGN_P1K_SIZE 5 +struct iwlagn_mic_keys { + u8 tx[IWLAGN_MIC_KEY_SIZE]; + u8 rx_unicast[IWLAGN_MIC_KEY_SIZE]; + u8 rx_mcast[IWLAGN_MIC_KEY_SIZE]; +} __packed; + +struct iwlagn_p1k_cache { + __le16 p1k[IWLAGN_P1K_SIZE]; +} __packed; + +#define IWLAGN_NUM_RX_P1K_CACHE 2 + +struct iwlagn_wowlan_tkip_params_cmd { + struct iwlagn_mic_keys mic_keys; + struct iwlagn_p1k_cache tx; + struct iwlagn_p1k_cache rx_uni[IWLAGN_NUM_RX_P1K_CACHE]; + struct iwlagn_p1k_cache rx_multi[IWLAGN_NUM_RX_P1K_CACHE]; +} __packed; + +/* + * REPLY_WOWLAN_KEK_KCK_MATERIAL + */ + +#define IWLAGN_KCK_MAX_SIZE 32 +#define IWLAGN_KEK_MAX_SIZE 32 + +struct iwlagn_wowlan_kek_kck_material_cmd { + u8 kck[IWLAGN_KCK_MAX_SIZE]; + u8 kek[IWLAGN_KEK_MAX_SIZE]; + __le16 kck_len; + __le16 kek_len; + __le64 replay_ctr; +} __packed; + /****************************************************************************** * (13) * Union of all expected notifications/responses: diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 45cc51c9c93..cf376f62b2f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -42,27 +42,7 @@ #include "iwl-sta.h" #include "iwl-helpers.h" #include "iwl-agn.h" - - -/* - * set bt_coex_active to true, uCode will do kill/defer - * every time the priority line is asserted (BT is sending signals on the - * priority line in the PCIx). - * set bt_coex_active to false, uCode will ignore the BT activity and - * perform the normal operation - * - * User might experience transmit issue on some platform due to WiFi/BT - * co-exist problem. The possible behaviors are: - * Able to scan and finding all the available AP - * Not able to associate with any AP - * On those platforms, WiFi communication can be restored by set - * "bt_coex_active" module parameter to "false" - * - * default: bt_coex_active = true (BT_COEX_ENABLE) - */ -bool bt_coex_active = true; -module_param(bt_coex_active, bool, S_IRUGO); -MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); +#include "iwl-trans.h" u32 iwl_debug_level; @@ -164,7 +144,7 @@ int iwlcore_init_geos(struct iwl_priv *priv) sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; - if (priv->cfg->sku & IWL_SKU_N) + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) iwlcore_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_5GHZ); @@ -174,7 +154,7 @@ int iwlcore_init_geos(struct iwl_priv *priv) sband->bitrates = rates; sband->n_bitrates = IWL_RATE_COUNT_LEGACY; - if (priv->cfg->sku & IWL_SKU_N) + if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) iwlcore_init_ht_hw_capab(priv, &sband->ht_cap, IEEE80211_BAND_2GHZ); @@ -229,12 +209,12 @@ int iwlcore_init_geos(struct iwl_priv *priv) priv->tx_power_next = max_tx_power; if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && - priv->cfg->sku & IWL_SKU_A) { + priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) { + char buf[32]; + bus_get_hw_id(priv->bus, buf, sizeof(buf)); IWL_INFO(priv, "Incorrectly detected BG card as ABG. " - "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", - priv->pci_dev->device, - priv->pci_dev->subsystem_device); - priv->cfg->sku &= ~IWL_SKU_A; + "Please send your %s to maintainer.\n", buf); + priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ; } IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", @@ -383,6 +363,8 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) ctx->timing.beacon_interval = cpu_to_le16(beacon_int); } + ctx->beacon_int = beacon_int; + tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ interval_tm = beacon_int * TIME_UNIT; rem = do_div(tsf, interval_tm); @@ -396,8 +378,8 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) le32_to_cpu(ctx->timing.beacon_init_val), le16_to_cpu(ctx->timing.atim_window)); - return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd, - sizeof(ctx->timing), &ctx->timing); + return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd, + CMD_SYNC, sizeof(ctx->timing), &ctx->timing); } void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, @@ -547,19 +529,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv, return 0; } -u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - /* - * Assign the lowest rate -- should really get this from - * the beacon skb from mac80211. - */ - if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) - return IWL_RATE_1M_PLCP; - else - return IWL_RATE_6M_PLCP; -} - static void _iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf, struct iwl_rxon_context *ctx) @@ -619,8 +588,7 @@ static void _iwl_set_rxon_ht(struct iwl_priv *priv, rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; } - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " "extension channel offset 0x%x\n", @@ -874,12 +842,12 @@ static void iwlagn_abort_notification_waits(struct iwl_priv *priv) unsigned long flags; struct iwl_notification_wait *wait_entry; - spin_lock_irqsave(&priv->_agn.notif_wait_lock, flags); - list_for_each_entry(wait_entry, &priv->_agn.notif_waits, list) + spin_lock_irqsave(&priv->notif_wait_lock, flags); + list_for_each_entry(wait_entry, &priv->notif_waits, list) wait_entry->aborted = true; - spin_unlock_irqrestore(&priv->_agn.notif_wait_lock, flags); + spin_unlock_irqrestore(&priv->notif_wait_lock, flags); - wake_up_all(&priv->_agn.notif_waitq); + wake_up_all(&priv->notif_waitq); } void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) @@ -1018,8 +986,6 @@ void iwl_apm_stop(struct iwl_priv *priv) int iwl_apm_init(struct iwl_priv *priv) { int ret = 0; - u16 lctl; - IWL_DEBUG_INFO(priv, "Init card's basic functions\n"); /* @@ -1048,27 +1014,7 @@ int iwl_apm_init(struct iwl_priv *priv) iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); - /* - * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. - * Check if BIOS (or OS) enabled L1-ASPM on this device. - * If so (likely), disable L0S, so device moves directly L0->L1; - * costs negligible amount of power savings. - * If not (unlikely), enable L0S, so there is at least some - * power savings, even without L1. - */ - lctl = iwl_pcie_link_ctl(priv); - if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == - PCI_CFG_LINK_CTRL_VAL_L1_EN) { - /* L1-ASPM enabled; disable(!) L0S */ - iwl_set_bit(priv, CSR_GIO_REG, - CSR_GIO_REG_VAL_L0S_ENABLED); - IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n"); - } else { - /* L1-ASPM disabled; enable(!) L0S */ - iwl_clear_bit(priv, CSR_GIO_REG, - CSR_GIO_REG_VAL_L0S_ENABLED); - IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n"); - } + bus_apm_config(priv->bus); /* Configure analog phase-lock-loop before activating to D0A */ if (priv->cfg->base_params->pll_cfg_val) @@ -1127,9 +1073,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) if (priv->tx_power_user_lmt == tx_power && !force) return 0; - if (!priv->cfg->ops->lib->send_tx_power) - return -EOPNOTSUPP; - if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n", @@ -1163,7 +1106,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) prev_tx_power = priv->tx_power_user_lmt; priv->tx_power_user_lmt = tx_power; - ret = priv->cfg->ops->lib->send_tx_power(priv); + ret = iwlagn_send_tx_power(priv); /* if fail to set tx_power, restore the orig. tx power */ if (ret) { @@ -1182,7 +1125,7 @@ void iwl_send_bt_config(struct iwl_priv *priv) .kill_cts_mask = 0, }; - if (!bt_coex_active) + if (!iwlagn_mod_params.bt_coex_active) bt_cmd.flags = BT_COEX_DISABLE; else bt_cmd.flags = BT_COEX_ENABLE; @@ -1191,8 +1134,8 @@ void iwl_send_bt_config(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, "BT coex %s\n", (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); - if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, - sizeof(struct iwl_bt_cmd), &bt_cmd)) + if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG, + CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd)) IWL_ERR(priv, "failed to send BT Coex Config\n"); } @@ -1204,11 +1147,13 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) }; if (flags & CMD_ASYNC) - return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD, + return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD, + CMD_ASYNC, sizeof(struct iwl_statistics_cmd), - &statistics_cmd, NULL); + &statistics_cmd); else - return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD, + CMD_SYNC, sizeof(struct iwl_statistics_cmd), &statistics_cmd); } @@ -1275,10 +1220,9 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { iwl_connection_init_rx_config(priv, ctx); - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + iwlagn_set_rxon_chain(priv, ctx); - return iwlcore_commit_rxon(priv, ctx); + return iwlagn_commit_rxon(priv, ctx); } static int iwl_setup_interface(struct iwl_priv *priv, @@ -1431,26 +1375,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw, } -int iwl_alloc_txq_mem(struct iwl_priv *priv) -{ - if (!priv->txq) - priv->txq = kzalloc( - sizeof(struct iwl_tx_queue) * - priv->cfg->base_params->num_of_queues, - GFP_KERNEL); - if (!priv->txq) { - IWL_ERR(priv, "Not enough memory for txq\n"); - return -ENOMEM; - } - return 0; -} - -void iwl_free_txq_mem(struct iwl_priv *priv) -{ - kfree(priv->txq); - priv->txq = NULL; -} - #ifdef CONFIG_IWLWIFI_DEBUGFS #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) @@ -1912,7 +1836,7 @@ void iwl_setup_watchdog(struct iwl_priv *priv) { unsigned int timeout = priv->cfg->base_params->wd_timeout; - if (timeout) + if (timeout && !iwlagn_mod_params.wd_disable) mod_timer(&priv->watchdog, jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); else @@ -1973,35 +1897,28 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, #ifdef CONFIG_PM -int iwl_pci_suspend(struct device *device) +int iwl_suspend(struct iwl_priv *priv) { - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_priv *priv = pci_get_drvdata(pdev); - /* * This function is called when system goes into suspend state * mac80211 will call iwl_mac_stop() from the mac80211 suspend function * first but since iwl_mac_stop() has no knowledge of who the caller is, * it will not call apm_ops.stop() to stop the DMA operation. * Calling apm_ops.stop here to make sure we stop the DMA. + * + * But of course ... if we have configured WoWLAN then we did other + * things already :-) */ - iwl_apm_stop(priv); + if (!priv->wowlan) + iwl_apm_stop(priv); return 0; } -int iwl_pci_resume(struct device *device) +int iwl_resume(struct iwl_priv *priv) { - struct pci_dev *pdev = to_pci_dev(device); - struct iwl_priv *priv = pci_get_drvdata(pdev); bool hw_rfkill = false; - /* - * We disable the RETRY_TIMEOUT register (0x41) to keep - * PCI Tx retries from interfering with C3 CPU state. - */ - pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - iwl_enable_interrupts(priv); if (!(iwl_read32(priv, CSR_GP_CNTRL) & @@ -2018,13 +1935,4 @@ int iwl_pci_resume(struct device *device) return 0; } -const struct dev_pm_ops iwl_pm_ops = { - .suspend = iwl_pci_suspend, - .resume = iwl_pci_resume, - .freeze = iwl_pci_suspend, - .thaw = iwl_pci_resume, - .poweroff = iwl_pci_suspend, - .restore = iwl_pci_resume, -}; - #endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index a54d416ec34..3e6bb734dcb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -76,95 +76,29 @@ struct iwl_cmd; #define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" #define DRV_AUTHOR "<ilw@linux.intel.com>" -#define IWL_PCI_DEVICE(dev, subdev, cfg) \ - .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ - .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ - .driver_data = (kernel_ulong_t)&(cfg) - #define TIME_UNIT 1024 -#define IWL_SKU_G 0x1 -#define IWL_SKU_A 0x2 -#define IWL_SKU_N 0x8 - #define IWL_CMD(x) case x: return #x -struct iwl_hcmd_ops { - int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); - void (*set_rxon_chain)(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); - int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant); - void (*send_bt_config)(struct iwl_priv *priv); - int (*set_pan_params)(struct iwl_priv *priv); -}; - -struct iwl_hcmd_utils_ops { - u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data); - void (*gain_computation)(struct iwl_priv *priv, - u32 *average_noise, - u16 min_average_noise_antennat_i, - u32 min_average_noise, - u8 default_chain); - void (*chain_noise_reset)(struct iwl_priv *priv); - void (*tx_cmd_protection)(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags); - int (*calc_rssi)(struct iwl_priv *priv, - struct iwl_rx_phy_res *rx_resp); - int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); - void (*post_scan)(struct iwl_priv *priv); -}; - -struct iwl_apm_ops { - int (*init)(struct iwl_priv *priv); - void (*config)(struct iwl_priv *priv); -}; - -struct iwl_temp_ops { - void (*temperature)(struct iwl_priv *priv); -}; - struct iwl_lib_ops { /* set hw dependent parameters */ int (*set_hw_params)(struct iwl_priv *priv); - /* setup Rx handler */ - void (*rx_handler_setup)(struct iwl_priv *priv); - /* setup deferred work */ - void (*setup_deferred_work)(struct iwl_priv *priv); + /* setup BT Rx handler */ + void (*bt_rx_handler_setup)(struct iwl_priv *priv); + /* setup BT related deferred work */ + void (*bt_setup_deferred_work)(struct iwl_priv *priv); /* cancel deferred work */ void (*cancel_deferred_work)(struct iwl_priv *priv); - /* check validity of rtc data address */ - int (*is_valid_rtc_data_addr)(u32 addr); int (*set_channel_switch)(struct iwl_priv *priv, struct ieee80211_channel_switch *ch_switch); - /* power management */ - struct iwl_apm_ops apm_ops; - - /* power */ - int (*send_tx_power) (struct iwl_priv *priv); - void (*update_chain_flags)(struct iwl_priv *priv); + /* device specific configuration */ + void (*nic_config)(struct iwl_priv *priv); /* eeprom operations (as defined in iwl-eeprom.h) */ struct iwl_eeprom_ops eeprom_ops; /* temperature */ - struct iwl_temp_ops temp_ops; - - int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control); - void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); - -}; - -/* NIC specific ops */ -struct iwl_nic_ops { - void (*additional_nic_config)(struct iwl_priv *priv); -}; - -struct iwl_ops { - const struct iwl_lib_ops *lib; - const struct iwl_hcmd_ops *hcmd; - const struct iwl_hcmd_utils_ops *utils; - const struct iwl_nic_ops *nic; + void (*temperature)(struct iwl_priv *priv); }; struct iwl_mod_params { @@ -176,6 +110,12 @@ struct iwl_mod_params { int restart_fw; /* def: 1 = restart firmware */ bool plcp_check; /* def: true = enable plcp health check */ bool ack_check; /* def: false = disable ack health check */ + bool wd_disable; /* def: false = enable stuck queue check */ + bool bt_coex_active; /* def: true = enable bt coex */ + int led_mode; /* def: 0 = system default */ + bool no_sleep_autoadjust; /* def: true = disable autoadjust */ + bool power_save; /* def: false = disable power save */ + int power_level; /* def: 1 = power level */ }; /* @@ -225,7 +165,7 @@ struct iwl_base_params { * @ampdu_factor: Maximum A-MPDU length factor * @ampdu_density: Minimum A-MPDU spacing * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode -*/ + */ struct iwl_bt_params { bool advanced_bt_coexist; u8 bt_init_traffic_load; @@ -238,19 +178,31 @@ struct iwl_bt_params { }; /* * @use_rts_for_aggregation: use rts/cts protection for HT traffic -*/ + */ struct iwl_ht_params { const bool ht_greenfield_support; /* if used set to true */ bool use_rts_for_aggregation; + enum ieee80211_smps_mode smps_mode; }; /** * struct iwl_cfg + * @name: Offical name of the device * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre<api>.ucode. * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. + * @valid_tx_ant: valid transmit antenna + * @valid_rx_ant: valid receive antenna + * @sku: sku information from EEPROM + * @eeprom_ver: EEPROM version + * @eeprom_calib_ver: EEPROM calibration version + * @lib: pointer to the lib ops + * @additional_nic_config: additional nic configuration + * @base_params: pointer to basic parameters + * @ht_params: point to ht patameters + * @bt_params: pointer to bt parameters * @pa_type: used by 6000 series only to identify the type of Power Amplifier * @need_dc_calib: need to perform init dc calibration * @need_temp_offset_calib: need to perform temperature offset calibration @@ -260,7 +212,6 @@ struct iwl_ht_params { * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device * @iq_invert: I/Q inversion - * @disable_otp_refresh: disable OTP refresh current limit * * We enable the driver to be backward compatible wrt API version. The * driver specifies which APIs it supports (with @ucode_api_max being the @@ -277,11 +228,7 @@ struct iwl_ht_params { * } * * The ideal usage of this infrastructure is to treat a new ucode API - * release as a new hardware revision. That is, through utilizing the - * iwl_hcmd_utils_ops etc. we accommodate different command structures - * and flows between hardware versions (4965/5000) as well as their API - * versions. - * + * release as a new hardware revision. */ struct iwl_cfg { /* params specific to an individual device within a device family */ @@ -291,10 +238,11 @@ struct iwl_cfg { const unsigned int ucode_api_min; u8 valid_tx_ant; u8 valid_rx_ant; - unsigned int sku; + u16 sku; u16 eeprom_ver; u16 eeprom_calib_ver; - const struct iwl_ops *ops; + const struct iwl_lib_ops *lib; + void (*additional_nic_config)(struct iwl_priv *priv); /* params not likely to change within a device family */ struct iwl_base_params *base_params; /* params likely to change within a device family */ @@ -309,7 +257,6 @@ struct iwl_cfg { const bool rx_with_siso_diversity; const bool internal_wimax_coex; const bool iq_invert; - const bool disable_otp_refresh; }; /*************************** @@ -346,9 +293,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw, int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype newtype, bool newp2p); -int iwl_alloc_txq_mem(struct iwl_priv *priv); -void iwl_free_txq_mem(struct iwl_priv *priv); - #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_alloc_traffic_mem(struct iwl_priv *priv); void iwl_free_traffic_mem(struct iwl_priv *priv); @@ -390,28 +334,8 @@ static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx, /***************************************************** * RX ******************************************************/ -void iwl_cmd_queue_free(struct iwl_priv *priv); -void iwl_cmd_queue_unmap(struct iwl_priv *priv); -int iwl_rx_queue_alloc(struct iwl_priv *priv); -void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, - struct iwl_rx_queue *q); -int iwl_rx_queue_space(const struct iwl_rx_queue *q); -void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); - void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); -/* TX helpers */ - -/***************************************************** -* TX -******************************************************/ -void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); -int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id); -void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id); -void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); -void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id); void iwl_setup_watchdog(struct iwl_priv *priv); /***************************************************** * TX power @@ -419,13 +343,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv); int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); /******************************************************************************* - * Rate - ******************************************************************************/ - -u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, - struct iwl_rxon_context *ctx); - -/******************************************************************************* * Scanning ******************************************************************************/ void iwl_init_scan_params(struct iwl_priv *priv); @@ -469,51 +386,19 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, *****************************************************/ const char *get_cmd_string(u8 cmd); -int __must_check iwl_send_cmd_sync(struct iwl_priv *priv, - struct iwl_host_cmd *cmd); -int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); -int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, - u16 len, const void *data); -int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len, - const void *data, - void (*callback)(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt)); - -int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); - - -/***************************************************** - * PCI * - *****************************************************/ - -static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) -{ - int pos; - u16 pci_lnk_ctl; - pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP); - pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); - return pci_lnk_ctl; -} - void iwl_bg_watchdog(unsigned long data); u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval); __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, u32 addon, u32 beacon_interval); #ifdef CONFIG_PM -int iwl_pci_suspend(struct device *device); -int iwl_pci_resume(struct device *device); -extern const struct dev_pm_ops iwl_pm_ops; - -#define IWL_PM_OPS (&iwl_pm_ops) - -#else /* !CONFIG_PM */ - -#define IWL_PM_OPS NULL - +int iwl_suspend(struct iwl_priv *priv); +int iwl_resume(struct iwl_priv *priv); #endif /* !CONFIG_PM */ +int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg); +void __devexit iwl_remove(struct iwl_priv * priv); + /***************************************************** * Error Handling Debugging ******************************************************/ @@ -613,11 +498,7 @@ void iwl_apm_stop(struct iwl_priv *priv); int iwl_apm_init(struct iwl_priv *priv); int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx); -static inline int iwlcore_commit_rxon(struct iwl_priv *priv, - struct iwl_rxon_context *ctx) -{ - return priv->cfg->ops->hcmd->commit_rxon(priv, ctx); -} + static inline const struct ieee80211_supported_band *iwl_get_hw_mode( struct iwl_priv *priv, enum ieee80211_band band) { @@ -630,7 +511,6 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) priv->cfg->bt_params->advanced_bt_coexist; } -extern bool bt_coex_active; extern bool bt_siso_mode; diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index 5ab90ba7a02..d6dbb042304 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -351,6 +351,7 @@ #define CSR_UCODE_SW_BIT_RFKILL (0x00000002) #define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) #define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) +#define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020) /* GP Driver */ #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003) diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 2824ccbcc1f..f9a407e40af 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -32,10 +32,10 @@ struct iwl_priv; extern u32 iwl_debug_level; -#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a) -#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a) -#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a) -#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a) +#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a) +#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a) +#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a) +#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a) #define iwl_print_hex_error(priv, p, len) \ do { \ @@ -78,8 +78,6 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); void iwl_dbgfs_unregister(struct iwl_priv *priv); -extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, - int bufsz); #else static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) { @@ -125,13 +123,13 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) /* 0x00000F00 - 0x00000100 */ #define IWL_DL_POWER (1 << 8) #define IWL_DL_TEMP (1 << 9) -#define IWL_DL_NOTIF (1 << 10) +/* reserved (1 << 10) */ #define IWL_DL_SCAN (1 << 11) /* 0x0000F000 - 0x00001000 */ #define IWL_DL_ASSOC (1 << 12) #define IWL_DL_DROP (1 << 13) -#define IWL_DL_TXPOWER (1 << 14) -#define IWL_DL_AP (1 << 15) +/* reserved (1 << 14) */ +#define IWL_DL_COEX (1 << 15) /* 0x000F0000 - 0x00010000 */ #define IWL_DL_FW (1 << 16) #define IWL_DL_RF_KILL (1 << 17) @@ -171,12 +169,10 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) #define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) #define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) -#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a) -#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a) +#define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ## a) #define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) #define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) -#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a) #define IWL_DEBUG_ASSOC(p, f, a...) \ IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) #define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 0e6a04b739a..ec1485b2d3f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -227,7 +227,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, /* default is to dump the entire data segment */ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { priv->dbgfs_sram_offset = 0x800000; - if (priv->ucode_type == UCODE_SUBTYPE_INIT) + if (priv->ucode_type == IWL_UCODE_INIT) priv->dbgfs_sram_len = priv->ucode_init.data.len; else priv->dbgfs_sram_len = priv->ucode_rt.data.len; @@ -322,6 +322,19 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file, return count; } +static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + + if (!priv->wowlan_sram) + return -ENODATA; + + return simple_read_from_buffer(user_buf, count, ppos, + priv->wowlan_sram, + priv->ucode_wowlan.data.len); +} static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -856,6 +869,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file, } DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_FILE_OPS(wowlan_sram); DEBUGFS_READ_WRITE_FILE_OPS(log_event); DEBUGFS_READ_FILE_OPS(nvm); DEBUGFS_READ_FILE_OPS(stations); @@ -1915,121 +1929,121 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n"); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY), - priv->_agn.reply_tx_stats.pp_delay); + priv->reply_tx_stats.pp_delay); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES), - priv->_agn.reply_tx_stats.pp_few_bytes); + priv->reply_tx_stats.pp_few_bytes); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO), - priv->_agn.reply_tx_stats.pp_bt_prio); + priv->reply_tx_stats.pp_bt_prio); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD), - priv->_agn.reply_tx_stats.pp_quiet_period); + priv->reply_tx_stats.pp_quiet_period); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK), - priv->_agn.reply_tx_stats.pp_calc_ttak); + priv->reply_tx_stats.pp_calc_ttak); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", iwl_get_tx_fail_reason( TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY), - priv->_agn.reply_tx_stats.int_crossed_retry); + priv->reply_tx_stats.int_crossed_retry); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT), - priv->_agn.reply_tx_stats.short_limit); + priv->reply_tx_stats.short_limit); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT), - priv->_agn.reply_tx_stats.long_limit); + priv->reply_tx_stats.long_limit); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN), - priv->_agn.reply_tx_stats.fifo_underrun); + priv->reply_tx_stats.fifo_underrun); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW), - priv->_agn.reply_tx_stats.drain_flow); + priv->reply_tx_stats.drain_flow); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH), - priv->_agn.reply_tx_stats.rfkill_flush); + priv->reply_tx_stats.rfkill_flush); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE), - priv->_agn.reply_tx_stats.life_expire); + priv->reply_tx_stats.life_expire); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS), - priv->_agn.reply_tx_stats.dest_ps); + priv->reply_tx_stats.dest_ps); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED), - priv->_agn.reply_tx_stats.host_abort); + priv->reply_tx_stats.host_abort); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY), - priv->_agn.reply_tx_stats.pp_delay); + priv->reply_tx_stats.pp_delay); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID), - priv->_agn.reply_tx_stats.sta_invalid); + priv->reply_tx_stats.sta_invalid); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED), - priv->_agn.reply_tx_stats.frag_drop); + priv->reply_tx_stats.frag_drop); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE), - priv->_agn.reply_tx_stats.tid_disable); + priv->reply_tx_stats.tid_disable); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED), - priv->_agn.reply_tx_stats.fifo_flush); + priv->reply_tx_stats.fifo_flush); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", iwl_get_tx_fail_reason( TX_STATUS_FAIL_INSUFFICIENT_CF_POLL), - priv->_agn.reply_tx_stats.insuff_cf_poll); + priv->reply_tx_stats.insuff_cf_poll); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX), - priv->_agn.reply_tx_stats.fail_hw_drop); + priv->reply_tx_stats.fail_hw_drop); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", iwl_get_tx_fail_reason( TX_STATUS_FAIL_NO_BEACON_ON_RADAR), - priv->_agn.reply_tx_stats.sta_color_mismatch); + priv->reply_tx_stats.sta_color_mismatch); pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", - priv->_agn.reply_tx_stats.unknown); + priv->reply_tx_stats.unknown); pos += scnprintf(buf + pos, bufsz - pos, "\nStatistics_Agg_TX_Error:\n"); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK), - priv->_agn.reply_agg_tx_stats.underrun); + priv->reply_agg_tx_stats.underrun); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK), - priv->_agn.reply_agg_tx_stats.bt_prio); + priv->reply_agg_tx_stats.bt_prio); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK), - priv->_agn.reply_agg_tx_stats.few_bytes); + priv->reply_agg_tx_stats.few_bytes); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK), - priv->_agn.reply_agg_tx_stats.abort); + priv->reply_agg_tx_stats.abort); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", iwl_get_agg_tx_fail_reason( AGG_TX_STATE_LAST_SENT_TTL_MSK), - priv->_agn.reply_agg_tx_stats.last_sent_ttl); + priv->reply_agg_tx_stats.last_sent_ttl); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", iwl_get_agg_tx_fail_reason( AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK), - priv->_agn.reply_agg_tx_stats.last_sent_try); + priv->reply_agg_tx_stats.last_sent_try); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", iwl_get_agg_tx_fail_reason( AGG_TX_STATE_LAST_SENT_BT_KILL_MSK), - priv->_agn.reply_agg_tx_stats.last_sent_bt_kill); + priv->reply_agg_tx_stats.last_sent_bt_kill); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK), - priv->_agn.reply_agg_tx_stats.scd_query); + priv->reply_agg_tx_stats.scd_query); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n", iwl_get_agg_tx_fail_reason( AGG_TX_STATE_TEST_BAD_CRC32_MSK), - priv->_agn.reply_agg_tx_stats.bad_crc32); + priv->reply_agg_tx_stats.bad_crc32); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK), - priv->_agn.reply_agg_tx_stats.response); + priv->reply_agg_tx_stats.response); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK), - priv->_agn.reply_agg_tx_stats.dump_tx); + priv->reply_agg_tx_stats.dump_tx); pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n", iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK), - priv->_agn.reply_agg_tx_stats.delay_tx); + priv->reply_agg_tx_stats.delay_tx); pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n", - priv->_agn.reply_agg_tx_stats.unknown); + priv->reply_agg_tx_stats.unknown); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); @@ -2493,7 +2507,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file, if (iwl_is_rfkill(priv)) return -EFAULT; - priv->cfg->ops->lib->dev_txfifo_flush(priv, IWL_DROP_ALL); + iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); return count; } @@ -2667,6 +2681,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); @@ -2693,8 +2708,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); - if (priv->cfg->ops->lib->dev_txfifo_flush) - DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index c8de236c141..6c9790cac8d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -31,6 +31,7 @@ #ifndef __iwl_dev_h__ #define __iwl_dev_h__ +#include <linux/interrupt.h> #include <linux/pci.h> /* for struct pci_device_id */ #include <linux/kernel.h> #include <linux/wait.h> @@ -47,6 +48,10 @@ #include "iwl-power.h" #include "iwl-agn-rs.h" #include "iwl-agn-tt.h" +#include "iwl-bus.h" +#include "iwl-trans.h" + +#define DRV_NAME "iwlagn" struct iwl_tx_queue; @@ -257,11 +262,9 @@ struct iwl_channel_info { enum { CMD_SYNC = 0, - CMD_SIZE_NORMAL = 0, - CMD_NO_SKB = 0, - CMD_ASYNC = (1 << 1), - CMD_WANT_SKB = (1 << 2), - CMD_MAPPED = (1 << 3), + CMD_ASYNC = BIT(0), + CMD_WANT_SKB = BIT(1), + CMD_ON_DEMAND = BIT(2), }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -294,6 +297,16 @@ enum iwl_hcmd_dataflag { IWL_HCMD_DFL_NOCOPY = BIT(0), }; +/** + * struct iwl_host_cmd - Host command to the uCode + * @data: array of chunks that composes the data of the host command + * @reply_page: pointer to the page that holds the response to the host command + * @callback: + * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC + * @len: array of the lenths of the chunks in data + * @dataflags: + * @id: id of the host command + */ struct iwl_host_cmd { const void *data[IWL_MAX_CMD_TFDS]; unsigned long reply_page; @@ -385,13 +398,6 @@ struct iwl_tid_data { struct iwl_ht_agg agg; }; -struct iwl_hw_key { - u32 cipher; - int keylen; - u8 keyidx; - u8 key[32]; -}; - union iwl_ht_rate_supp { u16 rates; struct { @@ -444,7 +450,6 @@ struct iwl_station_entry { struct iwl_addsta_cmd sta; struct iwl_tid_data tid[MAX_TID_COUNT]; u8 used, ctxid; - struct iwl_hw_key keyinfo; struct iwl_link_quality_cmd *lq; }; @@ -547,7 +552,8 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, - /* 16 and 17 reserved for future use */ + IWL_UCODE_TLV_WOWLAN_INST = 16, + IWL_UCODE_TLV_WOWLAN_DATA = 17, IWL_UCODE_TLV_FLAGS = 18, }; @@ -631,7 +637,6 @@ struct iwl_sensitivity_ranges { /** * struct iwl_hw_params * @max_txq_num: Max # Tx queues supported - * @dma_chnl_num: Number of Tx DMA/FIFO channels * @scd_bc_tbls_size: size of scheduler byte count tables * @tfd_size: TFD size * @tx/rx_chains_num: Number of TX/RX chains @@ -653,7 +658,6 @@ struct iwl_sensitivity_ranges { */ struct iwl_hw_params { u8 max_txq_num; - u8 dma_chnl_num; u16 scd_bc_tbls_size; u32 tfd_size; u8 tx_chains_num; @@ -663,7 +667,6 @@ struct iwl_hw_params { u16 max_rxq_size; u16 max_rxq_log; u32 rx_page_order; - u32 rx_wrt_ptr_reg; u8 max_stations; u8 ht40_channel; u8 max_beacon_itrvl; /* in 1024 ms */ @@ -694,8 +697,6 @@ struct iwl_hw_params { ****************************************************************************/ extern void iwl_update_chain_flags(struct iwl_priv *priv); extern const u8 iwl_bcast_addr[ETH_ALEN]; -extern int iwl_rxq_stop(struct iwl_priv *priv); -extern void iwl_txq_ctx_stop(struct iwl_priv *priv); extern int iwl_queue_space(const struct iwl_queue *q); static inline int iwl_queue_used(const struct iwl_queue *q, int i) { @@ -1152,6 +1153,8 @@ struct iwl_rxon_context { __le32 station_flags; + int beacon_int; + struct { bool non_gf_sta_present; u8 protection; @@ -1168,14 +1171,29 @@ enum iwl_scan_type { IWL_SCAN_OFFCH_TX, }; +enum iwlagn_ucode_type { + IWL_UCODE_NONE, + IWL_UCODE_REGULAR, + IWL_UCODE_INIT, + IWL_UCODE_WOWLAN, +}; + #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL struct iwl_testmode_trace { + u32 buff_size; + u32 total_size; + u32 num_chunks; u8 *cpu_addr; u8 *trace_addr; dma_addr_t dma_addr; bool trace_enabled; }; #endif + +/* uCode ownership */ +#define IWL_OWNERSHIP_DRIVER 0 +#define IWL_OWNERSHIP_TM 1 + struct iwl_priv { /* ieee device used by generic ieee processing code */ @@ -1243,11 +1261,8 @@ struct iwl_priv { spinlock_t reg_lock; /* protect hw register access */ struct mutex mutex; - /* basic pci-network driver stuff */ - struct pci_dev *pci_dev; - - /* pci hardware address support */ - void __iomem *hw_base; + struct iwl_bus *bus; /* bus specific data */ + struct iwl_trans trans; /* microcode/device supports multiple contexts */ u8 valid_contexts; @@ -1267,10 +1282,15 @@ struct iwl_priv { int fw_index; /* firmware we're trying to load */ u32 ucode_ver; /* version of ucode, copy of iwl_ucode.ver */ + + /* uCode owner: default: IWL_OWNERSHIP_DRIVER */ + u8 ucode_owner; + struct fw_img ucode_rt; struct fw_img ucode_init; + struct fw_img ucode_wowlan; - enum iwlagn_ucode_subtype ucode_type; + enum iwlagn_ucode_type ucode_type; u8 ucode_write_complete; /* the image write is complete */ char firmware_name[25]; @@ -1341,6 +1361,8 @@ struct iwl_priv { u8 mac80211_registered; + bool wowlan; + /* eeprom -- this is in the card's little endian byte order */ u8 *eeprom; int nvm_device_type; @@ -1376,56 +1398,54 @@ struct iwl_priv { } accum_stats, delta_stats, max_delta_stats; #endif - struct { - /* INT ICT Table */ - __le32 *ict_tbl; - void *ict_tbl_vir; - dma_addr_t ict_tbl_dma; - dma_addr_t aligned_ict_tbl_dma; - int ict_index; - u32 inta; - bool use_ict; - /* - * reporting the number of tids has AGG on. 0 means - * no AGGREGATION - */ - u8 agg_tids_count; - - struct iwl_rx_phy_res last_phy_res; - bool last_phy_res_valid; - - struct completion firmware_loading_complete; - - u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; - u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; - - /* - * chain noise reset and gain commands are the - * two extra calibration commands follows the standard - * phy calibration commands - */ - u8 phy_calib_chain_noise_reset_cmd; - u8 phy_calib_chain_noise_gain_cmd; - - /* counts reply_tx error */ - struct reply_tx_error_statistics reply_tx_stats; - struct reply_agg_tx_error_statistics reply_agg_tx_stats; - /* notification wait support */ - struct list_head notif_waits; - spinlock_t notif_wait_lock; - wait_queue_head_t notif_waitq; - - /* remain-on-channel offload support */ - struct ieee80211_channel *hw_roc_channel; - struct delayed_work hw_roc_work; - enum nl80211_channel_type hw_roc_chantype; - int hw_roc_duration; - bool hw_roc_setup; - - struct sk_buff *offchan_tx_skb; - int offchan_tx_timeout; - struct ieee80211_channel *offchan_tx_chan; - } _agn; + /* INT ICT Table */ + __le32 *ict_tbl; + void *ict_tbl_vir; + dma_addr_t ict_tbl_dma; + dma_addr_t aligned_ict_tbl_dma; + int ict_index; + u32 inta; + bool use_ict; + /* + * reporting the number of tids has AGG on. 0 means + * no AGGREGATION + */ + u8 agg_tids_count; + + struct iwl_rx_phy_res last_phy_res; + bool last_phy_res_valid; + + struct completion firmware_loading_complete; + + u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; + u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; + + /* + * chain noise reset and gain commands are the + * two extra calibration commands follows the standard + * phy calibration commands + */ + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + + /* counts reply_tx error */ + struct reply_tx_error_statistics reply_tx_stats; + struct reply_agg_tx_error_statistics reply_agg_tx_stats; + /* notification wait support */ + struct list_head notif_waits; + spinlock_t notif_wait_lock; + wait_queue_head_t notif_waitq; + + /* remain-on-channel offload support */ + struct ieee80211_channel *hw_roc_channel; + struct delayed_work hw_roc_work; + enum nl80211_channel_type hw_roc_chantype; + int hw_roc_duration; + bool hw_roc_setup; + + struct sk_buff *offchan_tx_skb; + int offchan_tx_timeout; + struct ieee80211_channel *offchan_tx_chan; /* bt coex */ u8 bt_enable_flag; @@ -1442,6 +1462,9 @@ struct iwl_priv { u16 dynamic_frag_thresh; u8 bt_ci_compliance; struct work_struct bt_traffic_change_work; + bool bt_enable_pspoll; + struct iwl_rxon_context *cur_rssi_ctx; + bool bt_is_sco; struct iwl_hw_params hw_params; @@ -1492,6 +1515,7 @@ struct iwl_priv { struct dentry *debugfs_dir; u32 dbgfs_sram_offset, dbgfs_sram_len; bool disable_ht40; + void *wowlan_sram; #endif /* CONFIG_IWLWIFI_DEBUGFS */ struct work_struct txpower_work; @@ -1509,9 +1533,14 @@ struct iwl_priv { bool led_registered; #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL struct iwl_testmode_trace testmode_trace; + u32 tm_fixed_rate; #endif - u32 dbg_fixed_rate; + /* WoWLAN GTK rekey data */ + u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; + __le64 replay_ctr; + __le16 last_seq_ctl; + bool have_rekey_data; }; /*iwl_priv */ static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 47a56bc1cd1..19d31a5e32e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -407,11 +407,6 @@ static int iwl_find_otp_image(struct iwl_priv *priv, return -EINVAL; } -const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) -{ - return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset); -} - u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) { if (!priv->eeprom) @@ -449,7 +444,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) } e = (__le16 *)priv->eeprom; - priv->cfg->ops->lib->apm_ops.init(priv); + iwl_apm_init(priv); ret = iwl_eeprom_verify_signature(priv); if (ret < 0) { @@ -548,7 +543,7 @@ static void iwl_init_band_reference(const struct iwl_priv *priv, const struct iwl_eeprom_channel **eeprom_ch_info, const u8 **eeprom_ch_index) { - u32 offset = priv->cfg->ops->lib-> + u32 offset = priv->cfg->lib-> eeprom_ops.regulatory_bands[eep_band - 1]; switch (eep_band) { case 1: /* 2.4GHz band */ @@ -754,9 +749,9 @@ int iwl_init_channel_map(struct iwl_priv *priv) } /* Check if we do have HT40 channels */ - if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == + if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && - priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == + priv->cfg->lib->eeprom_ops.regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) return 0; @@ -792,8 +787,8 @@ int iwl_init_channel_map(struct iwl_priv *priv) * driver need to process addition information * to determine the max channel tx power limits */ - if (priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower) - priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower(priv); + if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower) + priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv); return 0; } @@ -834,3 +829,28 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, return NULL; } + +void iwl_rf_config(struct iwl_priv *priv) +{ + u16 radio_cfg; + + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n", + EEPROM_RF_CFG_TYPE_MSK(radio_cfg), + EEPROM_RF_CFG_STEP_MSK(radio_cfg), + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + } else + WARN_ON(1); + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index c960c6fa009..e4bf8ac5e64 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -110,12 +110,10 @@ enum { }; /* SKU Capabilities */ -/* 5000 and up */ -#define EEPROM_SKU_CAP_BAND_POS (4) -#define EEPROM_SKU_CAP_BAND_SELECTION \ - (3 << EEPROM_SKU_CAP_BAND_POS) +#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4) +#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5) #define EEPROM_SKU_CAP_11N_ENABLE (1 << 6) -#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7) +#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7) #define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8) /* *regulatory* channel data format in eeprom, one for each channel. @@ -164,16 +162,12 @@ struct iwl_eeprom_enhanced_txpwr { s8 mimo3_max; } __packed; -/* 5000 Specific */ -#define EEPROM_5000_TX_POWER_VERSION (4) -#define EEPROM_5000_EEPROM_VERSION (0x11A) - -/* 5000 and up calibration */ +/* calibration */ #define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) #define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL) -/* 5000 temperature */ -#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL) +/* temperature */ +#define EEPROM_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL) /* agn links */ #define EEPROM_LINK_HOST (2*0x64) @@ -205,6 +199,10 @@ struct iwl_eeprom_enhanced_txpwr { #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ +/* 5000 Specific */ +#define EEPROM_5000_TX_POWER_VERSION (4) +#define EEPROM_5000_EEPROM_VERSION (0x11A) + /* 5050 Specific */ #define EEPROM_5050_TX_POWER_VERSION (4) #define EEPROM_5050_EEPROM_VERSION (0x21E) @@ -270,13 +268,13 @@ extern const u8 iwl_eeprom_band_1[14]; /* General */ #define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ +#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */ #define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ #define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ #define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ #define EEPROM_VERSION (2*0x44) /* 2 bytes */ #define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ #define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ -#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ #define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ #define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ @@ -294,7 +292,6 @@ extern const u8 iwl_eeprom_band_1[14]; struct iwl_eeprom_ops { const u32 regulatory_bands[7]; - const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset); void (*update_enhanced_txpower) (struct iwl_priv *priv); }; @@ -311,5 +308,6 @@ void iwl_free_channel_map(struct iwl_priv *priv); const struct iwl_channel_info *iwl_get_channel_info( const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); +void iwl_rf_config(struct iwl_priv *priv); #endif /* __iwl_eeprom_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index 6dfa806aefe..0ad60b3c04d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -326,7 +326,7 @@ #define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) /* Find Control/Status reg for given Tx DMA/FIFO channel */ -#define FH50_TCSR_CHNL_NUM (8) +#define FH_TCSR_CHNL_NUM (8) /* TCSR: tx_config register values */ #define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c deleted file mode 100644 index 76f99662314..00000000000 --- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ /dev/null @@ -1,291 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <net/mac80211.h> - -#include "iwl-dev.h" /* FIXME: remove */ -#include "iwl-debug.h" -#include "iwl-eeprom.h" -#include "iwl-core.h" - - -const char *get_cmd_string(u8 cmd) -{ - switch (cmd) { - IWL_CMD(REPLY_ALIVE); - IWL_CMD(REPLY_ERROR); - IWL_CMD(REPLY_RXON); - IWL_CMD(REPLY_RXON_ASSOC); - IWL_CMD(REPLY_QOS_PARAM); - IWL_CMD(REPLY_RXON_TIMING); - IWL_CMD(REPLY_ADD_STA); - IWL_CMD(REPLY_REMOVE_STA); - IWL_CMD(REPLY_REMOVE_ALL_STA); - IWL_CMD(REPLY_TXFIFO_FLUSH); - IWL_CMD(REPLY_WEPKEY); - IWL_CMD(REPLY_TX); - IWL_CMD(REPLY_LEDS_CMD); - IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); - IWL_CMD(COEX_PRIORITY_TABLE_CMD); - IWL_CMD(COEX_MEDIUM_NOTIFICATION); - IWL_CMD(COEX_EVENT_CMD); - IWL_CMD(REPLY_QUIET_CMD); - IWL_CMD(REPLY_CHANNEL_SWITCH); - IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); - IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); - IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); - IWL_CMD(POWER_TABLE_CMD); - IWL_CMD(PM_SLEEP_NOTIFICATION); - IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); - IWL_CMD(REPLY_SCAN_CMD); - IWL_CMD(REPLY_SCAN_ABORT_CMD); - IWL_CMD(SCAN_START_NOTIFICATION); - IWL_CMD(SCAN_RESULTS_NOTIFICATION); - IWL_CMD(SCAN_COMPLETE_NOTIFICATION); - IWL_CMD(BEACON_NOTIFICATION); - IWL_CMD(REPLY_TX_BEACON); - IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); - IWL_CMD(QUIET_NOTIFICATION); - IWL_CMD(REPLY_TX_PWR_TABLE_CMD); - IWL_CMD(MEASURE_ABORT_NOTIFICATION); - IWL_CMD(REPLY_BT_CONFIG); - IWL_CMD(REPLY_STATISTICS_CMD); - IWL_CMD(STATISTICS_NOTIFICATION); - IWL_CMD(REPLY_CARD_STATE_CMD); - IWL_CMD(CARD_STATE_NOTIFICATION); - IWL_CMD(MISSED_BEACONS_NOTIFICATION); - IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); - IWL_CMD(SENSITIVITY_CMD); - IWL_CMD(REPLY_PHY_CALIBRATION_CMD); - IWL_CMD(REPLY_RX_PHY_CMD); - IWL_CMD(REPLY_RX_MPDU_CMD); - IWL_CMD(REPLY_RX); - IWL_CMD(REPLY_COMPRESSED_BA); - IWL_CMD(CALIBRATION_CFG_CMD); - IWL_CMD(CALIBRATION_RES_NOTIFICATION); - IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); - IWL_CMD(REPLY_TX_POWER_DBM_CMD); - IWL_CMD(TEMPERATURE_NOTIFICATION); - IWL_CMD(TX_ANT_CONFIGURATION_CMD); - IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF); - IWL_CMD(REPLY_BT_COEX_PRIO_TABLE); - IWL_CMD(REPLY_BT_COEX_PROT_ENV); - IWL_CMD(REPLY_WIPAN_PARAMS); - IWL_CMD(REPLY_WIPAN_RXON); - IWL_CMD(REPLY_WIPAN_RXON_TIMING); - IWL_CMD(REPLY_WIPAN_RXON_ASSOC); - IWL_CMD(REPLY_WIPAN_QOS_PARAM); - IWL_CMD(REPLY_WIPAN_WEPKEY); - IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); - IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); - IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); - default: - return "UNKNOWN"; - - } -} - -#define HOST_COMPLETE_TIMEOUT (HZ / 2) - -static void iwl_generic_cmd_callback(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt) -{ - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from %s (0x%08X)\n", - get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - return; - } - -#ifdef CONFIG_IWLWIFI_DEBUG - switch (cmd->hdr.cmd) { - case REPLY_TX_LINK_QUALITY_CMD: - case SENSITIVITY_CMD: - IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", - get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - break; - default: - IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", - get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); - } -#endif -} - -static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - int ret; - - if (WARN_ON(!(cmd->flags & CMD_ASYNC))) - return -EINVAL; - - /* An asynchronous command can not expect an SKB to be set. */ - if (WARN_ON(cmd->flags & CMD_WANT_SKB)) - return -EINVAL; - - /* Assign a generic callback if one is not provided */ - if (!cmd->callback) - cmd->callback = iwl_generic_cmd_callback; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return -EBUSY; - - ret = iwl_enqueue_hcmd(priv, cmd); - if (ret < 0) { - IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(cmd->id), ret); - return ret; - } - return 0; -} - -int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - int cmd_idx; - int ret; - - if (WARN_ON(cmd->flags & CMD_ASYNC)) - return -EINVAL; - - /* A synchronous command can not have a callback set. */ - if (WARN_ON(cmd->callback)) - return -EINVAL; - - IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", - get_cmd_string(cmd->id)); - - set_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", - get_cmd_string(cmd->id)); - - cmd_idx = iwl_enqueue_hcmd(priv, cmd); - if (cmd_idx < 0) { - ret = cmd_idx; - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", - get_cmd_string(cmd->id), ret); - return ret; - } - - ret = wait_event_interruptible_timeout(priv->wait_command_queue, - !test_bit(STATUS_HCMD_ACTIVE, &priv->status), - HOST_COMPLETE_TIMEOUT); - if (!ret) { - if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { - IWL_ERR(priv, - "Error sending %s: time out after %dms.\n", - get_cmd_string(cmd->id), - jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", - get_cmd_string(cmd->id)); - ret = -ETIMEDOUT; - goto cancel; - } - } - - if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { - IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", - get_cmd_string(cmd->id)); - ret = -ECANCELED; - goto fail; - } - if (test_bit(STATUS_FW_ERROR, &priv->status)) { - IWL_ERR(priv, "Command %s failed: FW Error\n", - get_cmd_string(cmd->id)); - ret = -EIO; - goto fail; - } - if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { - IWL_ERR(priv, "Error: Response NULL in '%s'\n", - get_cmd_string(cmd->id)); - ret = -EIO; - goto cancel; - } - - return 0; - -cancel: - if (cmd->flags & CMD_WANT_SKB) { - /* - * Cancel the CMD_WANT_SKB flag for the cmd in the - * TX cmd queue. Otherwise in case the cmd comes - * in later, it will possibly set an invalid - * address (cmd->meta.source). - */ - priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= - ~CMD_WANT_SKB; - } -fail: - if (cmd->reply_page) { - iwl_free_pages(priv, cmd->reply_page); - cmd->reply_page = 0; - } - - return ret; -} - -int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) -{ - if (cmd->flags & CMD_ASYNC) - return iwl_send_cmd_async(priv, cmd); - - return iwl_send_cmd_sync(priv, cmd); -} - -int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) -{ - struct iwl_host_cmd cmd = { - .id = id, - .len = { len, }, - .data = { data, }, - }; - - return iwl_send_cmd_sync(priv, &cmd); -} - -int iwl_send_cmd_pdu_async(struct iwl_priv *priv, - u8 id, u16 len, const void *data, - void (*callback)(struct iwl_priv *priv, - struct iwl_device_cmd *cmd, - struct iwl_rx_packet *pkt)) -{ - struct iwl_host_cmd cmd = { - .id = id, - .len = { len, }, - .data = { data, }, - }; - - cmd.flags |= CMD_ASYNC; - cmd.callback = callback; - - return iwl_send_cmd_async(priv, &cmd); -} diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h index 41207a3645b..9d91552d13c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -120,7 +120,16 @@ static inline void iwl_wake_any_queue(struct iwl_priv *priv, } } +#ifdef ieee80211_stop_queue +#undef ieee80211_stop_queue +#endif + #define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue + +#ifdef ieee80211_wake_queue +#undef ieee80211_wake_queue +#endif + #define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue static inline void iwl_disable_interrupts(struct iwl_priv *priv) diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h index 869edc580ec..19a09310112 100644 --- a/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -34,22 +34,23 @@ #include "iwl-dev.h" #include "iwl-debug.h" #include "iwl-devtrace.h" +#include "iwl-bus.h" static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val) { trace_iwlwifi_dev_iowrite8(priv, ofs, val); - iowrite8(val, priv->hw_base + ofs); + bus_write8(priv->bus, ofs, val); } static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val) { trace_iwlwifi_dev_iowrite32(priv, ofs, val); - iowrite32(val, priv->hw_base + ofs); + bus_write32(priv->bus, ofs, val); } static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs) { - u32 val = ioread32(priv->hw_base + ofs); + u32 val = bus_read32(priv->bus, ofs); trace_iwlwifi_dev_ioread32(priv, ofs, val); return val; } diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c index 7c23beb49d7..a67ae56d546 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-led.c @@ -28,8 +28,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/netdevice.h> @@ -40,13 +38,9 @@ #include "iwl-dev.h" #include "iwl-core.h" +#include "iwl-agn.h" #include "iwl-io.h" - -/* default: IWL_LED_BLINK(0) using blinking index table */ -static int led_mode; -module_param(led_mode, int, S_IRUGO); -MODULE_PARM_DESC(led_mode, "0=system default, " - "1=On(RF On)/Off(RF Off), 2=blinking"); +#include "iwl-trans.h" /* Throughput OFF time(ms) ON time (ms) * >300 25 25 @@ -118,7 +112,7 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); - return iwl_send_cmd(priv, &cmd); + return trans_send_cmd(&priv->trans, &cmd); } /* Set led pattern command */ @@ -181,7 +175,7 @@ static int iwl_led_blink_set(struct led_classdev *led_cdev, void iwl_leds_init(struct iwl_priv *priv) { - int mode = led_mode; + int mode = iwlagn_mod_params.led_mode; int ret; if (mode == IWL_LED_DEFAULT) @@ -209,7 +203,8 @@ void iwl_leds_init(struct iwl_priv *priv) break; } - ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); + ret = led_classdev_register(priv->bus->dev, + &priv->led); if (ret) { kfree(priv->led.name); return; diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c new file mode 100644 index 00000000000..fb7e436b40c --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -0,0 +1,569 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <linux/pci.h> +#include <linux/pci-aspm.h> + +#include "iwl-bus.h" +#include "iwl-agn.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 +#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 +#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 + +struct iwl_pci_bus { + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; +}; + +#define IWL_BUS_GET_PCI_BUS(_iwl_bus) \ + ((struct iwl_pci_bus *) ((_iwl_bus)->bus_specific)) + +#define IWL_BUS_GET_PCI_DEV(_iwl_bus) \ + ((IWL_BUS_GET_PCI_BUS(_iwl_bus))->pci_dev) + +static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus) +{ + int pos; + u16 pci_lnk_ctl; + struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); + + pos = pci_pcie_cap(pci_dev); + pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; +} + +static bool iwl_pci_is_pm_supported(struct iwl_bus *bus) +{ + u16 lctl = iwl_pciexp_link_ctrl(bus); + + return !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); +} + +static void iwl_pci_apm_config(struct iwl_bus *bus) +{ + /* + * HW bug W/A for instability in PCIe bus L0S->L1 transition. + * Check if BIOS (or OS) enabled L1-ASPM on this device. + * If so (likely), disable L0S, so device moves directly L0->L1; + * costs negligible amount of power savings. + * If not (unlikely), enable L0S, so there is at least some + * power savings, even without L1. + */ + u16 lctl = iwl_pciexp_link_ctrl(bus); + + if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == + PCI_CFG_LINK_CTRL_VAL_L1_EN) { + /* L1-ASPM enabled; disable(!) L0S */ + iwl_set_bit(bus->drv_data, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n"); + } else { + /* L1-ASPM disabled; enable(!) L0S */ + iwl_clear_bit(bus->drv_data, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n"); + } +} + +static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) +{ + bus->drv_data = drv_data; +} + +static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], + int buf_len) +{ + struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); + + snprintf(buf, buf_len, "PCI ID: 0x%04X:0x%04X", pci_dev->device, + pci_dev->subsystem_device); +} + +static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val) +{ + iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs); +} + +static void iwl_pci_write32(struct iwl_bus *bus, u32 ofs, u32 val) +{ + iowrite32(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs); +} + +static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs) +{ + u32 val = ioread32(IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs); + return val; +} + +static struct iwl_bus_ops pci_ops = { + .get_pm_support = iwl_pci_is_pm_supported, + .apm_config = iwl_pci_apm_config, + .set_drv_data = iwl_pci_set_drv_data, + .get_hw_id = iwl_pci_get_hw_id, + .write8 = iwl_pci_write8, + .write32 = iwl_pci_write32, + .read32 = iwl_pci_read32, +}; + +#define IWL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = (kernel_ulong_t)&(cfg) + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { + {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ + +/* 5300 Series WiFi */ + {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ + +/* 5350 Series WiFi/WiMax */ + {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ + +/* 5150 Series Wifi/WiMax */ + {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ + + {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ + {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ + +/* 6x00 Series */ + {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, + {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, + {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, + +/* 6x05 Series */ + {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, + +/* 6x30 Series */ + {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, + {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, + +/* 6x50 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, + {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, + +/* 6150 WiFi/WiMax Series */ + {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, + +/* 1000 Series WiFi */ + {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, + {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, + +/* 100 Series WiFi */ + {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, + {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, + +/* 130 Series WiFi */ + {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, + +/* 2x00 Series */ + {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)}, + +/* 2x30 Series */ + {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)}, + +/* 6x35 Series */ + {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)}, + +/* 105 Series */ + {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)}, + +/* 135 Series */ + {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)}, + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + struct iwl_bus *bus; + struct iwl_pci_bus *pci_bus; + u16 pci_cmd; + int err; + + bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL); + if (!bus) { + dev_printk(KERN_ERR, &pdev->dev, + "Couldn't allocate iwl_pci_bus"); + err = -ENOMEM; + goto out_no_pci; + } + + pci_bus = IWL_BUS_GET_PCI_BUS(bus); + pci_bus->pci_dev = pdev; + + /* W/A - seems to solve weird behavior. We need to remove this if we + * don't want to stay in L1 all the time. This wastes a lot of power */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_no_pci; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (err) { + dev_printk(KERN_ERR, bus->dev, + "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed"); + goto out_pci_disable_device; + } + + pci_bus->hw_base = pci_iomap(pdev, 0, 0); + if (!pci_bus->hw_base) { + dev_printk(KERN_ERR, bus->dev, "pci_iomap failed"); + err = -ENODEV; + goto out_pci_release_regions; + } + + dev_printk(KERN_INFO, &pdev->dev, + "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + dev_printk(KERN_INFO, &pdev->dev, + "pci_resource_base = %p\n", pci_bus->hw_base); + + dev_printk(KERN_INFO, &pdev->dev, + "HW Revision ID = 0x%X\n", pdev->revision); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + err = pci_enable_msi(pdev); + if (err) { + dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed"); + goto out_iounmap; + } + + /* TODO: Move this away, not needed if not MSI */ + /* enable rfkill interrupt: hw bug w/a */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + } + + pci_set_drvdata(pdev, bus); + + bus->dev = &pdev->dev; + bus->irq = pdev->irq; + bus->ops = &pci_ops; + + err = iwl_probe(bus, cfg); + if (err) + goto out_disable_msi; + return 0; + +out_disable_msi: + pci_disable_msi(pdev); +out_iounmap: + pci_iounmap(pdev, pci_bus->hw_base); +out_pci_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); +out_pci_disable_device: + pci_disable_device(pdev); +out_no_pci: + kfree(bus); + return err; +} + +static void iwl_pci_down(struct iwl_bus *bus) +{ + struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific; + + pci_disable_msi(pci_bus->pci_dev); + pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base); + pci_release_regions(pci_bus->pci_dev); + pci_disable_device(pci_bus->pci_dev); + pci_set_drvdata(pci_bus->pci_dev, NULL); + + kfree(bus); +} + +static void __devexit iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_bus *bus = pci_get_drvdata(pdev); + + iwl_remove(bus->drv_data); + + iwl_pci_down(bus); +} + +#ifdef CONFIG_PM + +static int iwl_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_bus *bus = pci_get_drvdata(pdev); + + /* Before you put code here, think about WoWLAN. You cannot check here + * whether WoWLAN is enabled or not, and your code will run even if + * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. + */ + + return iwl_suspend(bus->drv_data); +} + +static int iwl_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_bus *bus = pci_get_drvdata(pdev); + + /* Before you put code here, think about WoWLAN. You cannot check here + * whether WoWLAN is enabled or not, and your code will run even if + * WoWLAN is enabled - the NIC may be alive. + */ + + /* + * We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + return iwl_resume(bus->drv_data); +} + +static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); + +#define IWL_PM_OPS (&iwl_dev_pm_ops) + +#else + +#define IWL_PM_OPS NULL + +#endif + +static struct pci_driver iwl_pci_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = __devexit_p(iwl_pci_remove), + .driver.pm = IWL_PM_OPS, +}; + +int __must_check iwl_pci_register_driver(void) +{ + int ret; + ret = pci_register_driver(&iwl_pci_driver); + if (ret) + pr_err("Unable to initialize PCI module\n"); + + return ret; +} + +void iwl_pci_unregister_driver(void) +{ + pci_unregister_driver(&iwl_pci_driver); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 595c930b28a..3ec619c6881 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -36,11 +36,13 @@ #include "iwl-eeprom.h" #include "iwl-dev.h" +#include "iwl-agn.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-commands.h" #include "iwl-debug.h" #include "iwl-power.h" +#include "iwl-trans.h" /* * Setting power level allows the card to go to sleep when not busy. @@ -51,16 +53,6 @@ */ /* - * For now, keep using power level 1 instead of automatically - * adjusting ... - */ -bool no_sleep_autoadjust = true; -module_param(no_sleep_autoadjust, bool, S_IRUGO); -MODULE_PARM_DESC(no_sleep_autoadjust, - "don't automatically adjust sleep level " - "according to maximum network latency"); - -/* * This defines the old power levels. They are still used by default * (level 1) and for thermal throttle (levels 3 through 5) */ @@ -254,7 +246,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, } } - if (priv->power_data.pci_pm) + if (priv->power_data.bus_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; else cmd->flags &= ~IWL_POWER_PCI_PM_MSK; @@ -269,7 +261,7 @@ static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, { memset(cmd, 0, sizeof(*cmd)); - if (priv->power_data.pci_pm) + if (priv->power_data.bus_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); @@ -305,7 +297,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK | IWL_POWER_FAST_PD; /* no use seeing frames for others */ - if (priv->power_data.pci_pm) + if (priv->power_data.bus_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; if (priv->cfg->base_params->shadow_reg_enable) @@ -343,7 +335,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) le32_to_cpu(cmd->sleep_interval[3]), le32_to_cpu(cmd->sleep_interval[4])); - return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, + return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC, sizeof(struct iwl_powertable_cmd), cmd); } @@ -355,7 +347,9 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, dtimper = priv->hw->conf.ps_dtim_period ?: 1; - if (priv->hw->conf.flags & IEEE80211_CONF_IDLE) + if (priv->wowlan) + iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); + else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { /* in thermal throttling low power state */ @@ -367,9 +361,15 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, iwl_static_sleep_cmd(priv, cmd, priv->power_data.debug_sleep_level_override, dtimper); - else if (no_sleep_autoadjust) - iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper); - else + else if (iwlagn_mod_params.no_sleep_autoadjust) { + if (iwlagn_mod_params.power_level > IWL_POWER_INDEX_1 && + iwlagn_mod_params.power_level <= IWL_POWER_INDEX_5) + iwl_static_sleep_cmd(priv, cmd, + iwlagn_mod_params.power_level, dtimper); + else + iwl_static_sleep_cmd(priv, cmd, + IWL_POWER_INDEX_1, dtimper); + } else iwl_power_fill_sleep_cmd(priv, cmd, priv->hw->conf.dynamic_ps_timeout, priv->hw->conf.max_sleep_period); @@ -408,9 +408,9 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) clear_bit(STATUS_POWER_PMI, &priv->status); - if (priv->cfg->ops->lib->update_chain_flags && update_chains) - priv->cfg->ops->lib->update_chain_flags(priv); - else if (priv->cfg->ops->lib->update_chain_flags) + if (update_chains) + iwl_update_chain_flags(priv); + else IWL_DEBUG_POWER(priv, "Cannot update the power, chain noise " "calibration running: %d\n", @@ -434,9 +434,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force) /* initialize to default */ void iwl_power_initialize(struct iwl_priv *priv) { - u16 lctl = iwl_pcie_link_ctl(priv); - - priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); + priv->power_data.bus_pm = bus_get_pm_support(priv->bus); priv->power_data.debug_sleep_level_override = -1; diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h index 59635d784e2..5f7b720cf1a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.h +++ b/drivers/net/wireless/iwlwifi/iwl-power.h @@ -43,7 +43,7 @@ struct iwl_power_mgr { struct iwl_powertable_cmd sleep_cmd; struct iwl_powertable_cmd sleep_cmd_next; int debug_sleep_level_override; - bool pci_pm; + bool bus_pm; }; int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index f00d188b2cf..2f267b8aabb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -168,6 +168,7 @@ * the scheduler (especially for queue #4/#9, the command queue, otherwise * the driver can't issue commands!): */ +#define SCD_MEM_LOWER_BOUND (0x0000) /** * Max Tx window size is the max number of contiguous TFDs that the scheduler @@ -177,53 +178,61 @@ #define SCD_WIN_SIZE 64 #define SCD_FRAME_LIMIT 64 -#define IWL_SCD_TXFIFO_POS_TID (0) -#define IWL_SCD_TXFIFO_POS_RA (4) -#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) +#define SCD_TXFIFO_POS_TID (0) +#define SCD_TXFIFO_POS_RA (4) +#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) /* agn SCD */ -#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0) -#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3) -#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4) -#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) -#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000) - -#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8) -#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) -#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) -#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) -#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) -#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) -#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) -#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) - -#define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600) -#define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1) -#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0) - -#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\ - (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) - -#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ - ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc) - -#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv) \ +#define SCD_QUEUE_STTS_REG_POS_TXF (0) +#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) +#define SCD_QUEUE_STTS_REG_POS_WSL (4) +#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) +#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000) + +#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) +#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) +#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) +#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) + +/* Context Data */ +#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) +#define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) + +/* Tx status */ +#define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) +#define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) + +/* Translation Data */ +#define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) +#define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808) + +#define SCD_CONTEXT_QUEUE_OFFSET(x)\ + (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) + +#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \ + ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) + +#define SCD_QUEUECHAIN_SEL_ALL(priv) \ (((1<<(priv)->hw_params.max_txq_num) - 1) &\ (~(1<<(priv)->cmd_queue))) -#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00) - -#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0) -#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8) -#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c) -#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10) -#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14) -#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4) -#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4) -#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8) -#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248) -#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108) -#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4) +#define SCD_BASE (PRPH_BASE + 0xa02c00) + +#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0) +#define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8) +#define SCD_AIT (SCD_BASE + 0x0c) +#define SCD_TXFACT (SCD_BASE + 0x10) +#define SCD_ACTIVE (SCD_BASE + 0x14) +#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4) +#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4) +#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) +#define SCD_AGGR_SEL (SCD_BASE + 0x248) +#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) +#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4) /*********************** END TX SCHEDULER *************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index b774517aa9f..8e314003b63 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -41,183 +41,6 @@ #include "iwl-agn-calib.h" #include "iwl-agn.h" -/****************************************************************************** - * - * RX path functions - * - ******************************************************************************/ - -/* - * Rx theory of operation - * - * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), - * each of which point to Receive Buffers to be filled by the NIC. These get - * used not only for Rx frames, but for any command response or notification - * from the NIC. The driver and NIC manage the Rx buffers by means - * of indexes into the circular buffer. - * - * Rx Queue Indexes - * The host/firmware share two index registers for managing the Rx buffers. - * - * The READ index maps to the first position that the firmware may be writing - * to -- the driver can read up to (but not including) this position and get - * good data. - * The READ index is managed by the firmware once the card is enabled. - * - * The WRITE index maps to the last position the driver has read from -- the - * position preceding WRITE is the last slot the firmware can place a packet. - * - * The queue is empty (no good data) if WRITE = READ - 1, and is full if - * WRITE = READ. - * - * During initialization, the host sets up the READ queue position to the first - * INDEX position, and WRITE to the last (READ - 1 wrapped) - * - * When the firmware places a packet in a buffer, it will advance the READ index - * and fire the RX interrupt. The driver can then query the READ index and - * process as many packets as possible, moving the WRITE index forward as it - * resets the Rx queue buffers with new memory. - * - * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) - * + A received packet is processed and handed to the kernel network stack, - * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. - * - * - * Driver sequence: - * - * iwl_rx_queue_alloc() Allocates rx_free - * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_rx_queue_restock - * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx - * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_rx_replenish - * - * -- enable interrupts -- - * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the - * READ INDEX, detaching the SKB from the pool. - * Moves the packet buffer from queue to rx_used. - * Calls iwl_rx_queue_restock to refill any empty - * slots. - * ... - * - */ - -/** - * iwl_rx_queue_space - Return number of free slots available in queue. - */ -int iwl_rx_queue_space(const struct iwl_rx_queue *q) -{ - int s = q->read - q->write; - if (s <= 0) - s += RX_QUEUE_SIZE; - /* keep some buffer to not confuse full and empty queue */ - s -= 2; - if (s < 0) - s = 0; - return s; -} - -/** - * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue - */ -void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) -{ - unsigned long flags; - u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; - u32 reg; - - spin_lock_irqsave(&q->lock, flags); - - if (q->need_update == 0) - goto exit_unlock; - - if (priv->cfg->base_params->shadow_reg_enable) { - /* shadow register enabled */ - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual); - } else { - /* If power-saving is in use, make sure device is awake */ - if (test_bit(STATUS_POWER_PMI, &priv->status)) { - reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(priv, - "Rx queue requesting wakeup," - " GP1 = 0x%x\n", reg); - iwl_set_bit(priv, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - goto exit_unlock; - } - - q->write_actual = (q->write & ~0x7); - iwl_write_direct32(priv, rx_wrt_ptr_reg, - q->write_actual); - - /* Else device is assumed to be awake */ - } else { - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_write_direct32(priv, rx_wrt_ptr_reg, - q->write_actual); - } - } - q->need_update = 0; - - exit_unlock: - spin_unlock_irqrestore(&q->lock, flags); -} - -int iwl_rx_queue_alloc(struct iwl_priv *priv) -{ - struct iwl_rx_queue *rxq = &priv->rxq; - struct device *dev = &priv->pci_dev->dev; - int i; - - spin_lock_init(&rxq->lock); - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - - /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, - GFP_KERNEL); - if (!rxq->bd) - goto err_bd; - - rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb; - - /* Fill the rx_used queue with _all_ of the Rx buffers */ - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) - list_add_tail(&rxq->pool[i].list, &rxq->rx_used); - - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - rxq->free_count = 0; - rxq->need_update = 0; - return 0; - -err_rb: - dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, - rxq->bd_dma); -err_bd: - return -ENOMEM; -} /****************************************************************************** * @@ -347,7 +170,7 @@ static bool iwl_good_ack_health(struct iwl_priv *priv, int actual_delta, expected_delta, ba_timeout_delta; struct statistics_tx *old; - if (priv->_agn.agg_tids_count) + if (priv->agg_tids_count) return true; old = &priv->statistics.tx; @@ -665,8 +488,8 @@ static void iwl_rx_statistics(struct iwl_priv *priv, iwl_rx_calc_noise(priv); queue_work(priv->workqueue, &priv->run_time_calib_work); } - if (priv->cfg->ops->lib->temp_ops.temperature && change) - priv->cfg->ops->lib->temp_ops.temperature(priv); + if (priv->cfg->lib->temperature && change) + priv->cfg->lib->temperature(priv); } static void iwl_rx_reply_statistics(struct iwl_priv *priv, @@ -769,8 +592,8 @@ static void iwl_rx_reply_rx_phy(struct iwl_priv *priv, { struct iwl_rx_packet *pkt = rxb_addr(rxb); - priv->_agn.last_phy_res_valid = true; - memcpy(&priv->_agn.last_phy_res, pkt->u.raw, + priv->last_phy_res_valid = true; + memcpy(&priv->last_phy_res, pkt->u.raw, sizeof(struct iwl_rx_phy_res)); } @@ -943,6 +766,47 @@ static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) return decrypt_out; } +/* Calc max signal level (dBm) among 3 possible receivers */ +static int iwlagn_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host + */ + struct iwlagn_non_cfg_phy *ncphy = + (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 val, rssi_a, rssi_b, rssi_c, max_rssi; + u8 agc; + + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]); + agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. + */ + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]); + rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >> + IWLAGN_OFDM_RSSI_A_BIT_POS; + rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >> + IWLAGN_OFDM_RSSI_B_BIT_POS; + val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]); + rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >> + IWLAGN_OFDM_RSSI_C_BIT_POS; + + max_rssi = max_t(u32, rssi_a, rssi_b); + max_rssi = max_t(u32, max_rssi, rssi_c); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + rssi_a, rssi_b, rssi_c, max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWLAGN_RSSI_OFFSET; +} + /* Called for REPLY_RX (legacy ABG frames), or * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ static void iwl_rx_reply_rx(struct iwl_priv *priv, @@ -977,11 +841,11 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv, phy_res->cfg_phy_cnt + len); ampdu_status = le32_to_cpu(rx_pkt_status); } else { - if (!priv->_agn.last_phy_res_valid) { + if (!priv->last_phy_res_valid) { IWL_ERR(priv, "MPDU frame without cached PHY data\n"); return; } - phy_res = &priv->_agn.last_phy_res; + phy_res = &priv->last_phy_res; amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); len = le16_to_cpu(amsdu->byte_count); @@ -1024,7 +888,7 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv, priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); /* Find max signal strength (dBm) among 3 antenna/receiver chains */ - rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res); + rx_status.signal = iwlagn_calc_rssi(priv, phy_res); iwl_dbg_log_rx_data_frame(priv, len, header); IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", @@ -1102,6 +966,64 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) /* block ack */ handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; - /* Set up hardware specific Rx handlers */ - priv->cfg->ops->lib->rx_handler_setup(priv); + /* init calibration handlers */ + priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = + iwlagn_rx_calib_result; + priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; + + /* set up notification wait support */ + spin_lock_init(&priv->notif_wait_lock); + INIT_LIST_HEAD(&priv->notif_waits); + init_waitqueue_head(&priv->notif_waitq); + + /* Set up BT Rx handlers */ + if (priv->cfg->lib->bt_rx_handler_setup) + priv->cfg->lib->bt_rx_handler_setup(priv); + +} + +void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + /* + * Do the notification wait before RX handlers so + * even if the RX handler consumes the RXB we have + * access to it in the notification wait entry. + */ + if (!list_empty(&priv->notif_waits)) { + struct iwl_notification_wait *w; + + spin_lock(&priv->notif_wait_lock); + list_for_each_entry(w, &priv->notif_waits, list) { + if (w->cmd != pkt->hdr.cmd) + continue; + IWL_DEBUG_RX(priv, + "Notif: %s, 0x%02x - wake the callers up\n", + get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + w->triggered = true; + if (w->fn) + w->fn(priv, pkt, w->fn_data); + } + spin_unlock(&priv->notif_wait_lock); + + wake_up_all(&priv->notif_waitq); + } + + if (priv->pre_rx_handler) + priv->pre_rx_handler(priv, rxb); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "No handler needed for %s, 0x%02x\n", + get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + } } diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index d60d630cb93..dd6937e9705 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -36,6 +36,8 @@ #include "iwl-sta.h" #include "iwl-io.h" #include "iwl-helpers.h" +#include "iwl-agn.h" +#include "iwl-trans.h" /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after * sending probe req. This should be set long enough to hear probe responses @@ -60,7 +62,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv) struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = REPLY_SCAN_ABORT_CMD, - .flags = CMD_WANT_SKB, + .flags = CMD_SYNC | CMD_WANT_SKB, }; /* Exit instantly with error when device is not ready @@ -73,7 +75,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv) test_bit(STATUS_EXIT_PENDING, &priv->status)) return -EIO; - ret = iwl_send_cmd_sync(priv, &cmd); + ret = trans_send_cmd(&priv->trans, &cmd); if (ret) return ret; @@ -348,9 +350,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, lockdep_assert_held(&priv->mutex); - if (WARN_ON(!priv->cfg->ops->utils->request_scan)) - return -EOPNOTSUPP; - cancel_delayed_work(&priv->scan_check); if (!iwl_is_ready_rf(priv)) { @@ -379,7 +378,7 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, priv->scan_start = jiffies; priv->scan_band = band; - ret = priv->cfg->ops->utils->request_scan(priv, vif); + ret = iwlagn_request_scan(priv, vif); if (ret) { clear_bit(STATUS_SCANNING, &priv->status); priv->scan_type = IWL_SCAN_NORMAL; @@ -566,10 +565,10 @@ static void iwl_bg_scan_completed(struct work_struct *work) goto out_settings; } - if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) { + if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->offchan_tx_skb) { ieee80211_tx_status_irqsafe(priv->hw, - priv->_agn.offchan_tx_skb); - priv->_agn.offchan_tx_skb = NULL; + priv->offchan_tx_skb); + priv->offchan_tx_skb = NULL; } if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { @@ -600,14 +599,7 @@ out_settings: if (!iwl_is_ready_rf(priv)) goto out; - /* - * We do not commit power settings while scan is pending, - * do it now if the settings changed. - */ - iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false); - iwl_set_tx_power(priv, priv->tx_power_next, false); - - priv->cfg->ops->utils->post_scan(priv); + iwlagn_post_scan(priv); out: mutex_unlock(&priv->mutex); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 7df2814fd4f..1ef3b7106ad 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -35,6 +35,8 @@ #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-sta.h" +#include "iwl-trans.h" +#include "iwl-agn.h" /* priv->sta_lock must be held */ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) @@ -132,6 +134,16 @@ static void iwl_add_sta_callback(struct iwl_priv *priv, } +static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) +{ + u16 size = (u16)sizeof(struct iwl_addsta_cmd); + struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; + memcpy(addsta, cmd, size); + /* resrved in 5000 */ + addsta->rate_n_flags = cpu_to_le16(0); + return size; +} + int iwl_send_add_sta(struct iwl_priv *priv, struct iwl_addsta_cmd *sta, u8 flags) { @@ -155,8 +167,8 @@ int iwl_send_add_sta(struct iwl_priv *priv, might_sleep(); } - cmd.len[0] = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); - ret = iwl_send_cmd(priv, &cmd); + cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data); + ret = trans_send_cmd(&priv->trans, &cmd); if (ret || (flags & CMD_ASYNC)) return ret; @@ -412,7 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, cmd.flags |= CMD_WANT_SKB; - ret = iwl_send_cmd(priv, &cmd); + ret = trans_send_cmd(&priv->trans, &cmd); if (ret) return ret; @@ -657,7 +669,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); } -int iwl_get_free_ucode_key_index(struct iwl_priv *priv) +int iwl_get_free_ucode_key_offset(struct iwl_priv *priv) { int i; @@ -781,7 +793,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, return -EINVAL; if (is_lq_table_valid(priv, ctx, lq)) - ret = iwl_send_cmd(priv, &cmd); + ret = trans_send_cmd(&priv->trans, &cmd); else ret = -EINVAL; diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h index ff64027ff4c..9a6768d6685 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.h +++ b/drivers/net/wireless/iwlwifi/iwl-sta.h @@ -31,9 +31,6 @@ #include "iwl-dev.h" -#define HW_KEY_DYNAMIC 0 -#define HW_KEY_DEFAULT 1 - #define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ #define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ #define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of @@ -47,7 +44,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx); void iwl_clear_ucode_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx); void iwl_dealloc_bcast_stations(struct iwl_priv *priv); -int iwl_get_free_ucode_key_index(struct iwl_priv *priv); +int iwl_get_free_ucode_key_offset(struct iwl_priv *priv); int iwl_send_add_sta(struct iwl_priv *priv, struct iwl_addsta_cmd *sta, u8 flags); int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c index 69b7e6bf2d6..b11f60de4f1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c +++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c @@ -69,7 +69,6 @@ #include <net/mac80211.h> #include <net/netlink.h> - #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-debug.h" @@ -77,7 +76,7 @@ #include "iwl-io.h" #include "iwl-agn.h" #include "iwl-testmode.h" - +#include "iwl-trans.h" /* The TLVs used in the gnl message policy between the kernel module and * user space application. iwl_testmode_gnl_msg_policy is to be carried @@ -101,9 +100,12 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, - [IWL_TM_ATTR_TRACE_DATA] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, }, [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, }; /* @@ -179,19 +181,19 @@ void iwl_testmode_init(struct iwl_priv *priv) static void iwl_trace_cleanup(struct iwl_priv *priv) { - struct device *dev = &priv->pci_dev->dev; - if (priv->testmode_trace.trace_enabled) { if (priv->testmode_trace.cpu_addr && priv->testmode_trace.dma_addr) - dma_free_coherent(dev, - TRACE_TOTAL_SIZE, + dma_free_coherent(priv->bus->dev, + priv->testmode_trace.total_size, priv->testmode_trace.cpu_addr, priv->testmode_trace.dma_addr); priv->testmode_trace.trace_enabled = false; priv->testmode_trace.cpu_addr = NULL; priv->testmode_trace.trace_addr = NULL; priv->testmode_trace.dma_addr = 0; + priv->testmode_trace.buff_size = 0; + priv->testmode_trace.total_size = 0; } } @@ -229,6 +231,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb) return -ENOMSG; } + cmd.flags = CMD_ON_DEMAND; cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); @@ -236,7 +239,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb) IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x," " len %d\n", cmd.id, cmd.flags, cmd.len[0]); /* ok, let's submit the command to ucode */ - return iwl_send_cmd(priv, &cmd); + return trans_send_cmd(&priv->trans, &cmd); } @@ -394,7 +397,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init, - UCODE_SUBTYPE_INIT, -1); + IWL_UCODE_INIT); if (status) IWL_DEBUG_INFO(priv, "Error loading init ucode: %d\n", status); @@ -402,14 +405,13 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: iwl_testmode_cfg_init_calib(priv); - iwlagn_stop_device(priv); + trans_stop_device(&priv->trans); break; case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_rt, - UCODE_SUBTYPE_REGULAR, - UCODE_SUBTYPE_REGULAR_NEW); + IWL_UCODE_REGULAR); if (status) { IWL_DEBUG_INFO(priv, "Error loading runtime ucode: %d\n", status); @@ -450,7 +452,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) "Error finding fixrate setting\n"); return -ENOMSG; } - priv->dbg_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); + priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); break; default: @@ -482,16 +484,29 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) struct iwl_priv *priv = hw->priv; struct sk_buff *skb; int status = 0; - struct device *dev = &priv->pci_dev->dev; + struct device *dev = priv->bus->dev; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: if (priv->testmode_trace.trace_enabled) return -EBUSY; + if (!tb[IWL_TM_ATTR_TRACE_SIZE]) + priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF; + else + priv->testmode_trace.buff_size = + nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]); + if (!priv->testmode_trace.buff_size) + return -EINVAL; + if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN || + priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX) + return -EINVAL; + + priv->testmode_trace.total_size = + priv->testmode_trace.buff_size + TRACE_BUFF_PADD; priv->testmode_trace.cpu_addr = dma_alloc_coherent(dev, - TRACE_TOTAL_SIZE, + priv->testmode_trace.total_size, &priv->testmode_trace.dma_addr, GFP_KERNEL); if (!priv->testmode_trace.cpu_addr) @@ -500,7 +515,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN( priv->testmode_trace.cpu_addr, 0x100); memset(priv->testmode_trace.trace_addr, 0x03B, - TRACE_BUFF_SIZE); + priv->testmode_trace.buff_size); skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, sizeof(priv->testmode_trace.dma_addr) + 20); if (!skb) { @@ -518,34 +533,14 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) "Error sending msg : %d\n", status); } + priv->testmode_trace.num_chunks = + DIV_ROUND_UP(priv->testmode_trace.buff_size, + TRACE_CHUNK_SIZE); break; case IWL_TM_CMD_APP2DEV_END_TRACE: iwl_trace_cleanup(priv); break; - - case IWL_TM_CMD_APP2DEV_READ_TRACE: - if (priv->testmode_trace.trace_enabled && - priv->testmode_trace.trace_addr) { - skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, - 20 + TRACE_BUFF_SIZE); - if (skb == NULL) { - IWL_DEBUG_INFO(priv, - "Error allocating memory\n"); - return -ENOMEM; - } - NLA_PUT(skb, IWL_TM_ATTR_TRACE_DATA, - TRACE_BUFF_SIZE, - priv->testmode_trace.trace_addr); - status = cfg80211_testmode_reply(skb); - if (status < 0) { - IWL_DEBUG_INFO(priv, - "Error sending msg : %d\n", status); - } - } else - return -EFAULT; - break; - default: IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n"); return -ENOSYS; @@ -560,6 +555,73 @@ nla_put_failure: return -EMSGSIZE; } +static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct iwl_priv *priv = hw->priv; + int idx, length; + + if (priv->testmode_trace.trace_enabled && + priv->testmode_trace.trace_addr) { + idx = cb->args[4]; + if (idx >= priv->testmode_trace.num_chunks) + return -ENOENT; + length = TRACE_CHUNK_SIZE; + if (((idx + 1) == priv->testmode_trace.num_chunks) && + (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE)) + length = priv->testmode_trace.buff_size % + TRACE_CHUNK_SIZE; + + NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, + priv->testmode_trace.trace_addr + + (TRACE_CHUNK_SIZE * idx)); + idx++; + cb->args[4] = idx; + return 0; + } else + return -EFAULT; + + nla_put_failure: + return -ENOBUFS; +} + +/* + * This function handles the user application switch ucode ownership. + * + * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and + * decide who the current owner of the uCode + * + * If the current owner is OWNERSHIP_TM, then the only host command + * can deliver to uCode is from testmode, all the other host commands + * will dropped. + * + * default driver is the owner of uCode in normal operational mode + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct iwl_priv *priv = hw->priv; + u8 owner; + + if (!tb[IWL_TM_ATTR_UCODE_OWNER]) { + IWL_DEBUG_INFO(priv, "Error finding ucode owner\n"); + return -ENOMSG; + } + + owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]); + if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM)) + priv->ucode_owner = owner; + else { + IWL_DEBUG_INFO(priv, "Invalid owner\n"); + return -EINVAL; + } + return 0; +} + + /* The testmode gnl message handler that takes the gnl message from the * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then * invoke the corresponding handlers. @@ -581,7 +643,7 @@ nla_put_failure: */ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) { - struct nlattr *tb[IWL_TM_ATTR_MAX - 1]; + struct nlattr *tb[IWL_TM_ATTR_MAX]; struct iwl_priv *priv = hw->priv; int result; @@ -629,6 +691,11 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) result = iwl_testmode_trace(hw, tb); break; + case IWL_TM_CMD_APP2DEV_OWNERSHIP: + IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n"); + result = iwl_testmode_ownership(hw, tb); + break; + default: IWL_DEBUG_INFO(priv, "Unknown testmode command\n"); result = -ENOSYS; @@ -638,3 +705,50 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) mutex_unlock(&priv->mutex); return result; } + +int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len) +{ + struct nlattr *tb[IWL_TM_ATTR_MAX]; + struct iwl_priv *priv = hw->priv; + int result; + u32 cmd; + + if (cb->args[3]) { + /* offset by 1 since commands start at 0 */ + cmd = cb->args[3] - 1; + } else { + result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, + iwl_testmode_gnl_msg_policy); + if (result) { + IWL_DEBUG_INFO(priv, + "Error parsing the gnl message : %d\n", result); + return result; + } + + /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ + if (!tb[IWL_TM_ATTR_COMMAND]) { + IWL_DEBUG_INFO(priv, + "Error finding testmode command type\n"); + return -ENOMSG; + } + cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); + cb->args[3] = cmd + 1; + } + + /* in case multiple accesses to the device happens */ + mutex_lock(&priv->mutex); + switch (cmd) { + case IWL_TM_CMD_APP2DEV_READ_TRACE: + IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n"); + result = iwl_testmode_trace_dump(hw, tb, skb, cb); + break; + default: + result = -EINVAL; + break; + } + + mutex_unlock(&priv->mutex); + return result; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h index a88085e9b36..b980bda4b0f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-testmode.h +++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h @@ -66,120 +66,161 @@ #include <linux/types.h> -/* Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and +/* + * Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and * from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX). - * The command ID is carried with IWL_TM_ATTR_COMMAND. There are three types of - * of command from user space and two types of command from kernel space. - * See below. + * The command ID is carried with IWL_TM_ATTR_COMMAND. + * + * @IWL_TM_CMD_APP2DEV_UCODE: + * commands from user application to the uCode, + * the actual uCode host command ID is carried with + * IWL_TM_ATTR_UCODE_CMD_ID + * + * @IWL_TM_CMD_APP2DEV_REG_READ32: + * @IWL_TM_CMD_APP2DEV_REG_WRITE32: + * @IWL_TM_CMD_APP2DEV_REG_WRITE8: + * commands from user applicaiton to access register + * + * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name + * @IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: load initial uCode image + * @IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: perform calibration + * @IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: load runtime uCode image + * @IWL_TM_CMD_APP2DEV_GET_EEPROM: request EEPROM data + * @IWL_TM_CMD_APP2DEV_FIXRATE_REQ: set fix MCS + * commands fom user space for pure driver level operations + * + * @IWL_TM_CMD_APP2DEV_BEGIN_TRACE: + * @IWL_TM_CMD_APP2DEV_END_TRACE: + * @IWL_TM_CMD_APP2DEV_READ_TRACE: + * commands fom user space for uCode trace operations + * + * @IWL_TM_CMD_DEV2APP_SYNC_RSP: + * commands from kernel space to carry the synchronous response + * to user application + * @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT: + * commands from kernel space to multicast the spontaneous messages + * to user application + * @IWL_TM_CMD_DEV2APP_EEPROM_RSP: + * commands from kernel space to carry the eeprom response + * to user application + * @IWL_TM_CMD_APP2DEV_OWNERSHIP: + * commands from user application to own change the ownership of the uCode + * if application has the ownership, the only host command from + * testmode will deliver to uCode. Default owner is driver */ enum iwl_tm_cmd_t { - /* commands from user application to the uCode, - * the actual uCode host command ID is carried with - * IWL_TM_ATTR_UCODE_CMD_ID */ - IWL_TM_CMD_APP2DEV_UCODE = 1, - - /* commands from user applicaiton to access register */ - IWL_TM_CMD_APP2DEV_REG_READ32, - IWL_TM_CMD_APP2DEV_REG_WRITE32, - IWL_TM_CMD_APP2DEV_REG_WRITE8, - - /* commands fom user space for pure driver level operations */ - IWL_TM_CMD_APP2DEV_GET_DEVICENAME, - IWL_TM_CMD_APP2DEV_LOAD_INIT_FW, - IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB, - IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW, - IWL_TM_CMD_APP2DEV_GET_EEPROM, - IWL_TM_CMD_APP2DEV_FIXRATE_REQ, - /* if there is other new command for the driver layer operation, - * append them here */ - - /* commands fom user space for uCode trace operations */ - IWL_TM_CMD_APP2DEV_BEGIN_TRACE, - IWL_TM_CMD_APP2DEV_END_TRACE, - IWL_TM_CMD_APP2DEV_READ_TRACE, - - /* commands from kernel space to carry the synchronous response - * to user application */ - IWL_TM_CMD_DEV2APP_SYNC_RSP, - - /* commands from kernel space to multicast the spontaneous messages - * to user application */ - IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, - - /* commands from kernel space to carry the eeprom response - * to user application */ - IWL_TM_CMD_DEV2APP_EEPROM_RSP, - - IWL_TM_CMD_MAX, + IWL_TM_CMD_APP2DEV_UCODE = 1, + IWL_TM_CMD_APP2DEV_REG_READ32 = 2, + IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3, + IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4, + IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5, + IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6, + IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7, + IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW = 8, + IWL_TM_CMD_APP2DEV_GET_EEPROM = 9, + IWL_TM_CMD_APP2DEV_FIXRATE_REQ = 10, + IWL_TM_CMD_APP2DEV_BEGIN_TRACE = 11, + IWL_TM_CMD_APP2DEV_END_TRACE = 12, + IWL_TM_CMD_APP2DEV_READ_TRACE = 13, + IWL_TM_CMD_DEV2APP_SYNC_RSP = 14, + IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15, + IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16, + IWL_TM_CMD_APP2DEV_OWNERSHIP = 17, + IWL_TM_CMD_MAX = 18, }; +/* + * Atrribute filed in testmode command + * See enum iwl_tm_cmd_t. + * + * @IWL_TM_ATTR_NOT_APPLICABLE: + * The attribute is not applicable or invalid + * @IWL_TM_ATTR_COMMAND: + * From user space to kernel space: + * the command either destines to ucode, driver, or register; + * From kernel space to user space: + * the command either carries synchronous response, + * or the spontaneous message multicast from the device; + * + * @IWL_TM_ATTR_UCODE_CMD_ID: + * @IWL_TM_ATTR_UCODE_CMD_DATA: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE, + * The mandatory fields are : + * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID; + * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands; + * The optional fields are: + * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload + * to the ucode + * + * @IWL_TM_ATTR_REG_OFFSET: + * @IWL_TM_ATTR_REG_VALUE8: + * @IWL_TM_ATTR_REG_VALUE32: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX, + * The mandatory fields are: + * IWL_TM_ATTR_REG_OFFSET for the offset of the target register; + * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value + * + * @IWL_TM_ATTR_SYNC_RSP: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP, + * The mandatory fields are: + * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user + * application command + * + * @IWL_TM_ATTR_UCODE_RX_PKT: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, + * The mandatory fields are: + * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user + * application + * + * @IWL_TM_ATTR_EEPROM: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM, + * The mandatory fields are: + * IWL_TM_ATTR_EEPROM for the data content responging to the user + * application + * + * @IWL_TM_ATTR_TRACE_ADDR: + * @IWL_TM_ATTR_TRACE_SIZE: + * @IWL_TM_ATTR_TRACE_DUMP: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE, + * The mandatory fields are: + * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address + * IWL_TM_ATTR_MEM_TRACE_SIZE for the trace buffer size + * IWL_TM_ATTR_MEM_TRACE_DUMP for the trace dump + * + * @IWL_TM_ATTR_FIXRATE: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ, + * The mandatory fields are: + * IWL_TM_ATTR_FIXRATE for the fixed rate + * + * @IWL_TM_ATTR_UCODE_OWNER: + * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP, + * The mandatory fields are: + * IWL_TM_ATTR_UCODE_OWNER for the new owner + */ enum iwl_tm_attr_t { - IWL_TM_ATTR_NOT_APPLICABLE = 0, - - /* From user space to kernel space: - * the command either destines to ucode, driver, or register; - * See enum iwl_tm_cmd_t. - * - * From kernel space to user space: - * the command either carries synchronous response, - * or the spontaneous message multicast from the device; - * See enum iwl_tm_cmd_t. */ - IWL_TM_ATTR_COMMAND, - - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE, - * The mandatory fields are : - * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID; - * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands; - * The optional fields are: - * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload - * to the ucode */ - IWL_TM_ATTR_UCODE_CMD_ID, - IWL_TM_ATTR_UCODE_CMD_DATA, - - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX, - * The mandatory fields are: - * IWL_TM_ATTR_REG_OFFSET for the offset of the target register; - * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value */ - IWL_TM_ATTR_REG_OFFSET, - IWL_TM_ATTR_REG_VALUE8, - IWL_TM_ATTR_REG_VALUE32, - - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP, - * The mandatory fields are: - * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user - * application command */ - IWL_TM_ATTR_SYNC_RSP, - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, - * The mandatory fields are: - * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user - * application */ - IWL_TM_ATTR_UCODE_RX_PKT, - - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM, - * The mandatory fields are: - * IWL_TM_ATTR_EEPROM for the data content responging to the user - * application */ - IWL_TM_ATTR_EEPROM, - - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE, - * The mandatory fields are: - * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address - */ - IWL_TM_ATTR_TRACE_ADDR, - IWL_TM_ATTR_TRACE_DATA, - - /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ, - * The mandatory fields are: - * IWL_TM_ATTR_FIXRATE for the fixed rate - */ - IWL_TM_ATTR_FIXRATE, - - IWL_TM_ATTR_MAX, + IWL_TM_ATTR_NOT_APPLICABLE = 0, + IWL_TM_ATTR_COMMAND = 1, + IWL_TM_ATTR_UCODE_CMD_ID = 2, + IWL_TM_ATTR_UCODE_CMD_DATA = 3, + IWL_TM_ATTR_REG_OFFSET = 4, + IWL_TM_ATTR_REG_VALUE8 = 5, + IWL_TM_ATTR_REG_VALUE32 = 6, + IWL_TM_ATTR_SYNC_RSP = 7, + IWL_TM_ATTR_UCODE_RX_PKT = 8, + IWL_TM_ATTR_EEPROM = 9, + IWL_TM_ATTR_TRACE_ADDR = 10, + IWL_TM_ATTR_TRACE_SIZE = 11, + IWL_TM_ATTR_TRACE_DUMP = 12, + IWL_TM_ATTR_FIXRATE = 13, + IWL_TM_ATTR_UCODE_OWNER = 14, + IWL_TM_ATTR_MAX = 15, }; /* uCode trace buffer */ -#define TRACE_BUFF_SIZE 0x20000 +#define TRACE_BUFF_SIZE_MAX 0x200000 +#define TRACE_BUFF_SIZE_MIN 0x20000 +#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN #define TRACE_BUFF_PADD 0x2000 -#define TRACE_TOTAL_SIZE (TRACE_BUFF_SIZE + TRACE_BUFF_PADD) +#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024) #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h new file mode 100644 index 00000000000..b79330d8418 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h @@ -0,0 +1,82 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_trans_int_pcie_h__ +#define __iwl_trans_int_pcie_h__ + +/*This file includes the declaration that are internal to the + * trans_pcie layer */ + +/***************************************************** +* RX +******************************************************/ +void iwl_bg_rx_replenish(struct work_struct *data); +void iwl_irq_tasklet(struct iwl_priv *priv); +void iwlagn_rx_replenish(struct iwl_priv *priv); +void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q); + +/***************************************************** +* ICT +******************************************************/ +int iwl_reset_ict(struct iwl_priv *priv); +void iwl_disable_ict(struct iwl_priv *priv); +int iwl_alloc_isr_ict(struct iwl_priv *priv); +void iwl_free_isr_ict(struct iwl_priv *priv); +irqreturn_t iwl_isr_ict(int irq, void *data); + + +/***************************************************** +* TX / HCMD +******************************************************/ +void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); +void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int index); +int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset); +int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id); +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); +int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, + u16 len, const void *data); +void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); +void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); +int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo); +void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, + int txq_id, u32 index); +void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry); +void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, + int frame_limit); + +#endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c new file mode 100644 index 00000000000..47486029040 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c @@ -0,0 +1,979 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/gfp.h> + +#include "iwl-dev.h" +#include "iwl-agn.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-trans-int-pcie.h" + +/****************************************************************************** + * + * RX path functions + * + ******************************************************************************/ + +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of indexes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_rx_queue_space - Return number of free slots available in queue. + */ +static int iwl_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/** + * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue + */ +void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q) +{ + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + if (priv->cfg->base_params->shadow_reg_enable) { + /* shadow register enabled */ + /* Device expects a multiple of 8 */ + q->write_actual = (q->write & ~0x7); + iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual); + } else { + /* If power-saving is in use, make sure device is awake */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, + "Rx queue requesting wakeup," + " GP1 = 0x%x\n", reg); + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + q->write_actual = (q->write & ~0x7); + iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, + q->write_actual); + + /* Else device is assumed to be awake */ + } else { + /* Device expects a multiple of 8 */ + q->write_actual = (q->write & ~0x7); + iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, + q->write_actual); + } + } + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); +} + +/** + * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/** + * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static void iwlagn_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + + spin_lock_irqsave(&rxq->lock, flags); + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* The overwritten rxb must be a used one */ + rxb = rxq->queue[rxq->write]; + BUG_ON(rxb && rxb->page); + + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, + rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "alloc_pages failed, " + "order: %d\n", + priv->hw_params.rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, "Failed to alloc_pages with %s." + "Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? + "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + spin_unlock_irqrestore(&rxq->lock, flags); + + BUG_ON(rxb->page); + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(priv->bus->dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + DMA_FROM_DEVICE); + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwlagn_rx_replenish(struct iwl_priv *priv) +{ + unsigned long flags; + + iwlagn_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwlagn_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwlagn_rx_replenish_now(struct iwl_priv *priv) +{ + iwlagn_rx_allocate(priv, GFP_ATOMIC); + + iwlagn_rx_queue_restock(priv); +} + +void iwl_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwlagn_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + if (WARN_ON(rxb == NULL)) { + i = (i + 1) & RX_QUEUE_MASK; + continue; + } + + rxq->queue[i] = NULL; + + dma_unmap_page(priv->bus->dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + DMA_FROM_DEVICE); + pkt = rxb_addr(rxb); + + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, + i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_dev_rx(priv, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_RX) && + (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + iwl_rx_dispatch(priv, rxb); + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking + * trans_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + DMA_FROM_DEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwlagn_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwlagn_rx_replenish_now(priv); + else + iwlagn_rx_queue_restock(priv); +} + +/* tasklet for iwlagn interrupt */ +void iwl_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta = 0; + u32 handled = 0; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + */ + /* There is a hardware bug in the interrupt mask function that some + * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if + * they are disabled in the CSR_INT_MASK register. Furthermore the + * ICT interrupt handling mechanism has another bug that might cause + * these unmasked interrupts fail to be detected. We workaround the + * hardware bugs here by ACKing all the possible interrupts so that + * interrupt coalescing can still be achieved. + */ + iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); + + inta = priv->inta; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", + inta, inta_mask); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* saved interrupt in inta variable now we can reset priv->inta */ + priv->inta = 0; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_txq_update_write_ptr(priv, &priv->txq[i]); + + priv->isr_stats.wakeup++; + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | + CSR_INT_BIT_RX_PERIODIC)) { + IWL_DEBUG_ISR(priv, "Rx interrupt\n"); + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + iwl_write32(priv, CSR_FH_INT_STATUS, + CSR_FH_INT_RX_MASK); + } + if (inta & CSR_INT_BIT_RX_PERIODIC) { + handled |= CSR_INT_BIT_RX_PERIODIC; + iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); + } + /* Sending RX interrupt require many steps to be done in the + * the device: + * 1- write interrupt to current index in ICT table. + * 2- dma RX frame. + * 3- update RX shared data to indicate last write index. + * 4- send interrupt. + * This could lead to RX race, driver could receive RX interrupt + * but the shared data changes does not reflect this; + * periodic interrupt will detect any dangling Rx activity. + */ + + /* Disable periodic interrupt; we use it as just a one-shot. */ + iwl_write8(priv, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_DIS); + iwl_rx_handle(priv); + + /* + * Enable periodic interrupt in 8 msec only if we received + * real RX interrupt (instead of just periodic int), to catch + * any dangling Rx interrupt. If it was just the periodic + * interrupt, there was no dangling Rx activity, and no need + * to extend the periodic interrupt; one-shot is enough. + */ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) + iwl_write8(priv, CSR_INT_PERIODIC_REG, + CSR_INT_PERIODIC_ENA); + + priv->isr_stats.rx++; + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up_interruptible(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); +} + +/****************************************************************************** + * + * ICT functions + * + ******************************************************************************/ +#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) + +/* Free dram table */ +void iwl_free_isr_ict(struct iwl_priv *priv) +{ + if (priv->ict_tbl_vir) { + dma_free_coherent(priv->bus->dev, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, + priv->ict_tbl_vir, + priv->ict_tbl_dma); + priv->ict_tbl_vir = NULL; + memset(&priv->ict_tbl_dma, 0, + sizeof(priv->ict_tbl_dma)); + memset(&priv->aligned_ict_tbl_dma, 0, + sizeof(priv->aligned_ict_tbl_dma)); + } +} + + +/* allocate dram shared table it is a PAGE_SIZE aligned + * also reset all data related to ICT table interrupt. + */ +int iwl_alloc_isr_ict(struct iwl_priv *priv) +{ + + /* allocate shrared data table */ + priv->ict_tbl_vir = + dma_alloc_coherent(priv->bus->dev, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, + &priv->ict_tbl_dma, GFP_KERNEL); + if (!priv->ict_tbl_vir) + return -ENOMEM; + + /* align table to PAGE_SIZE boundary */ + priv->aligned_ict_tbl_dma = + ALIGN(priv->ict_tbl_dma, PAGE_SIZE); + + IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n", + (unsigned long long)priv->ict_tbl_dma, + (unsigned long long)priv->aligned_ict_tbl_dma, + (int)(priv->aligned_ict_tbl_dma - + priv->ict_tbl_dma)); + + priv->ict_tbl = priv->ict_tbl_vir + + (priv->aligned_ict_tbl_dma - + priv->ict_tbl_dma); + + IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n", + priv->ict_tbl, priv->ict_tbl_vir, + (int)(priv->aligned_ict_tbl_dma - + priv->ict_tbl_dma)); + + /* reset table and index to all 0 */ + memset(priv->ict_tbl_vir, 0, + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); + priv->ict_index = 0; + + /* add periodic RX interrupt */ + priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC; + return 0; +} + +/* Device is going up inform it about using ICT interrupt table, + * also we need to tell the driver to start using ICT interrupt. + */ +int iwl_reset_ict(struct iwl_priv *priv) +{ + u32 val; + unsigned long flags; + + if (!priv->ict_tbl_vir) + return 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + + memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); + + val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT; + + val |= CSR_DRAM_INT_TBL_ENABLE; + val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; + + IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X " + "aligned dma address %Lx\n", + val, + (unsigned long long)priv->aligned_ict_tbl_dma); + + iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val); + priv->use_ict = true; + priv->ict_index = 0; + iwl_write32(priv, CSR_INT, priv->inta_mask); + iwl_enable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/* Device is going down disable ict interrupt usage */ +void iwl_disable_ict(struct iwl_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + priv->use_ict = false; + spin_unlock_irqrestore(&priv->lock, flags); +} + +static irqreturn_t iwl_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_fh; +#endif + if (!priv) + return IRQ_NONE; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta) { + IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared. It might have already raised + * an interrupt */ + IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + goto unplugged; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, " + "fh 0x%08x\n", inta, inta_mask, inta_fh); + } +#endif + + priv->inta |= inta; + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta)) + tasklet_schedule(&priv->irq_tasklet); + else if (test_bit(STATUS_INT_ENABLED, &priv->status) && + !priv->inta) + iwl_enable_interrupts(priv); + + unplugged: + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. */ + /* only Re-enable if disabled by irq and no schedules tasklet. */ + if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) + iwl_enable_interrupts(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_NONE; +} + +/* interrupt handler using ict table, with this interrupt driver will + * stop using INTA register to get device's interrupt, reading this register + * is expensive, device will write interrupts in ICT dram table, increment + * index then will fire interrupt to driver, driver will OR all ICT table + * entries from current index up to table entry with 0 value. the result is + * the interrupt we need to service, driver will set the entries back to 0 and + * set index. + */ +irqreturn_t iwl_isr_ict(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 val = 0; + unsigned long flags; + + if (!priv) + return IRQ_NONE; + + /* dram interrupt table not set yet, + * use legacy interrupt. + */ + if (!priv->use_ict) + return iwl_isr(irq, data); + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. + */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!priv->ict_tbl[priv->ict_index]) { + IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); + goto none; + } + + /* read all entries that not 0 start with ict_index */ + while (priv->ict_tbl[priv->ict_index]) { + + val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]); + IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n", + priv->ict_index, + le32_to_cpu( + priv->ict_tbl[priv->ict_index])); + priv->ict_tbl[priv->ict_index] = 0; + priv->ict_index = iwl_queue_inc_wrap(priv->ict_index, + ICT_COUNT); + + } + + /* We should not get this value, just ignore it. */ + if (val == 0xffffffff) + val = 0; + + /* + * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit + * (bit 15 before shifting it to 31) to clear when using interrupt + * coalescing. fortunately, bits 18 and 19 stay set when this happens + * so we use them to decide on the real state of the Rx bit. + * In order words, bit 15 is set if bit 18 or bit 19 are set. + */ + if (val & 0xC0000) + val |= 0x8000; + + inta = (0xff & val) | ((0xff00 & val) << 16); + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", + inta, inta_mask, val); + + inta &= priv->inta_mask; + priv->inta |= inta; + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta)) + tasklet_schedule(&priv->irq_tasklet); + else if (test_bit(STATUS_INT_ENABLED, &priv->status) && + !priv->inta) { + /* Allow interrupt if was disabled by this handler and + * no tasklet was schedules, We should not enable interrupt, + * tasklet will enable it. + */ + iwl_enable_interrupts(priv); + } + + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. + * only Re-enable if disabled by irq. + */ + if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) + iwl_enable_interrupts(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_NONE; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index 137dba95b1a..a6b2b1db0b1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c @@ -26,18 +26,58 @@ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ - #include <linux/etherdevice.h> -#include <linux/sched.h> #include <linux/slab.h> +#include <linux/sched.h> #include <net/mac80211.h> -#include "iwl-eeprom.h" + #include "iwl-agn.h" #include "iwl-dev.h" #include "iwl-core.h" -#include "iwl-sta.h" #include "iwl-io.h" #include "iwl-helpers.h" +#include "iwl-trans-int-pcie.h" + +/** + * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt) +{ + struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + int write_ptr = txq->q.write_ptr; + int txq_id = txq->q.id; + u8 sec_ctl = 0; + u8 sta_id = 0; + u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + + WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); + + sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; + sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; + + switch (sec_ctl & TX_CMD_SEC_MSK) { + case TX_CMD_SEC_CCM: + len += CCMP_MIC_LEN; + break; + case TX_CMD_SEC_TKIP: + len += TKIP_ICV_LEN; + break; + case TX_CMD_SEC_WEP: + len += WEP_IV_LEN + WEP_ICV_LEN; + break; + } + + bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; +} /** * iwl_txq_update_write_ptr - Send new write index to hardware @@ -126,9 +166,8 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) } static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, - struct iwl_tfd *tfd, int dma_dir) + struct iwl_tfd *tfd, enum dma_data_direction dma_dir) { - struct pci_dev *dev = priv->pci_dev; int i; int num_tbs; @@ -143,14 +182,14 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, /* Unmap tx_cmd */ if (num_tbs) - pci_unmap_single(dev, + dma_unmap_single(priv->bus->dev, dma_unmap_addr(meta, mapping), dma_unmap_len(meta, len), - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) - pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), + dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i), iwl_tfd_tb_get_len(tfd, i), dma_dir); } @@ -158,28 +197,29 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @priv - driver private data * @txq - tx queue + * @index - the index of the TFD to be freed * * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int index) { struct iwl_tfd *tfd_tmp = txq->tfds; - int index = txq->q.read_ptr; iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index], - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); /* free SKB */ if (txq->txb) { struct sk_buff *skb; - skb = txq->txb[txq->q.read_ptr].skb; + skb = txq->txb[index].skb; /* can be called from irqs-disabled context */ if (skb) { dev_kfree_skb_any(skb); - txq->txb[txq->q.read_ptr].skb = NULL; + txq->txb[index].skb = NULL; } } } @@ -221,140 +261,6 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, return 0; } -/* - * Tell nic where to find circular buffer of Tx Frame Descriptors for - * given Tx queue, and enable the DMA channel used for that queue. - * - * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA - * channels supported in hardware. - */ -static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) -{ - int txq_id = txq->q.id; - - /* Circular buffer (TFD queue in DRAM) physical base address */ - iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), - txq->q.dma_addr >> 8); - - return 0; -} - -/** - * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's - */ -void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) -{ - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct iwl_queue *q = &txq->q; - - if (q->n_bd == 0) - return; - - while (q->write_ptr != q->read_ptr) { - iwlagn_txq_free_tfd(priv, txq); - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); - } -} - -/** - * iwl_tx_queue_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) -{ - struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct device *dev = &priv->pci_dev->dev; - int i; - - iwl_tx_queue_unmap(priv, txq_id); - - /* De-alloc array of command/tx buffers */ - for (i = 0; i < TFD_TX_CMD_SLOTS; i++) - kfree(txq->cmd[i]); - - /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) - dma_free_coherent(dev, priv->hw_params.tfd_size * - txq->q.n_bd, txq->tfds, txq->q.dma_addr); - - /* De-alloc array of per-TFD driver data */ - kfree(txq->txb); - txq->txb = NULL; - - /* deallocate arrays */ - kfree(txq->cmd); - kfree(txq->meta); - txq->cmd = NULL; - txq->meta = NULL; - - /* 0-fill queue descriptor structure */ - memset(txq, 0, sizeof(*txq)); -} - -/** - * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue - */ -void iwl_cmd_queue_unmap(struct iwl_priv *priv) -{ - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; - struct iwl_queue *q = &txq->q; - int i; - - if (q->n_bd == 0) - return; - - while (q->read_ptr != q->write_ptr) { - i = get_cmd_index(q, q->read_ptr); - - if (txq->meta[i].flags & CMD_MAPPED) { - iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i], - PCI_DMA_BIDIRECTIONAL); - txq->meta[i].flags = 0; - } - - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); - } -} - -/** - * iwl_cmd_queue_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -void iwl_cmd_queue_free(struct iwl_priv *priv) -{ - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; - struct device *dev = &priv->pci_dev->dev; - int i; - - iwl_cmd_queue_unmap(priv); - - /* De-alloc array of command/tx buffers */ - for (i = 0; i < TFD_CMD_SLOTS; i++) - kfree(txq->cmd[i]); - - /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) - dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, - txq->tfds, txq->q.dma_addr); - - /* deallocate arrays */ - kfree(txq->cmd); - kfree(txq->meta); - txq->cmd = NULL; - txq->meta = NULL; - - /* 0-fill queue descriptor structure */ - memset(txq, 0, sizeof(*txq)); -} - /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services * @@ -393,11 +299,10 @@ int iwl_queue_space(const struct iwl_queue *q) return s; } - /** * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ -static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, +int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, int count, int slots_num, u32 id) { q->n_bd = count; @@ -427,122 +332,185 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, return 0; } -/** - * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue - */ -static int iwl_tx_queue_alloc(struct iwl_priv *priv, - struct iwl_tx_queue *txq, u32 id) +/*TODO: this functions should NOT be exported from trans module - export it + * until the reclaim flow will be brought to the transport module too. + * Add a declaration to make sparse happy */ +void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + +void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq) { - struct device *dev = &priv->pci_dev->dev; - size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; - - /* Driver private data, only for Tx (not command) queues, - * not shared with device. */ - if (id != priv->cmd_queue) { - txq->txb = kzalloc(sizeof(txq->txb[0]) * - TFD_QUEUE_SIZE_MAX, GFP_KERNEL); - if (!txq->txb) { - IWL_ERR(priv, "kmalloc for auxiliary BD " - "structures failed\n"); - goto error; - } - } else { - txq->txb = NULL; - } + struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + int txq_id = txq->q.id; + int read_ptr = txq->q.read_ptr; + u8 sta_id = 0; + __le16 bc_ent; - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, - GFP_KERNEL); - if (!txq->tfds) { - IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); - goto error; - } - txq->q.id = id; + WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); + + if (txq_id != priv->cmd_queue) + sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; + + bc_ent = cpu_to_le16(1 | (sta_id << 12)); + scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + + if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; +} + +static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); return 0; +} + +static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) +{ + /* Simply stop the queue, but don't change any configuration; + * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ + iwl_write_prph(priv, + SCD_QUEUE_STATUS_BITS(txq_id), + (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, + int txq_id, u32 index) +{ + iwl_write_direct32(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index); +} + +void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; - error: - kfree(txq->txb); - txq->txb = NULL; + iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id), + (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); - return -ENOMEM; + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", + active ? "Activate" : "Deactivate", + scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); } -/** - * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue - */ -int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id) +void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, + int frame_limit) { - int i, len; - int ret; + int tx_fifo, txq_id, ssn_idx; + u16 ra_tid; + unsigned long flags; + struct iwl_tid_data *tid_data; - txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num, - GFP_KERNEL); - txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num, - GFP_KERNEL); + if (WARN_ON(sta_id == IWL_INVALID_STATION)) + return; + if (WARN_ON(tid >= MAX_TID_COUNT)) + return; - if (!txq->meta || !txq->cmd) - goto out_free_arrays; + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + ssn_idx = SEQ_TO_SN(tid_data->seq_number); + txq_id = tid_data->agg.txq_id; + tx_fifo = tid_data->agg.tx_fifo; + spin_unlock_irqrestore(&priv->sta_lock, flags); - len = sizeof(struct iwl_device_cmd); - for (i = 0; i < slots_num; i++) { - txq->cmd[i] = kmalloc(len, GFP_KERNEL); - if (!txq->cmd[i]) - goto err; - } + ra_tid = BUILD_RAxTID(sta_id, tid); - /* Alloc driver data array and TFD circular buffer */ - ret = iwl_tx_queue_alloc(priv, txq, txq_id); - if (ret) - goto err; + spin_lock_irqsave(&priv->lock, flags); - txq->need_update = 0; + /* Stop this Tx queue before configuring it */ + iwlagn_tx_queue_stop_scheduler(priv, txq_id); - /* - * For the default queues 0-3, set up the swq_id - * already -- all others need to get one later - * (if they need one at all). - */ - if (txq_id < 4) - iwl_set_swq_id(txq, txq_id, txq_id); + /* Map receiver-address / traffic-ID to this queue */ + iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); - /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise - * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ - BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + /* Set this queue as a chain-building queue */ + iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); - /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); - if (ret) - return ret; + /* enable aggregations for the queue */ + iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id)); - /* Tell device where to find queue */ - iwlagn_tx_queue_init(priv, txq); + /* Place first TFD at index corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); - return 0; -err: - for (i = 0; i < slots_num; i++) - kfree(txq->cmd[i]); -out_free_arrays: - kfree(txq->meta); - kfree(txq->cmd); - - return -ENOMEM; + /* Set up Tx window size and frame limit for this queue */ + iwl_write_targ_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + + sizeof(u32), + ((frame_limit << + SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + ((frame_limit << + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + + iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); + + /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ + iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + spin_unlock_irqrestore(&priv->lock, flags); } -void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, - int slots_num, u32 txq_id) +int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) { - memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num); + if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || + (IWLAGN_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IWL_ERR(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWLAGN_FIRST_AMPDU_QUEUE, + IWLAGN_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } - txq->need_update = 0; + iwlagn_tx_queue_stop_scheduler(priv, txq_id); + + iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id)); - /* Initialize queue's high/low-water marks, and head/tail indexes */ - iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); - /* Tell device where to find queue */ - iwlagn_tx_queue_init(priv, txq); + iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(priv, txq_id); + iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + return 0; } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -556,7 +524,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, * failed. On success, it turns the index (> 0) of command in the * command queue. */ -int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) { struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; struct iwl_queue *q = &txq->q; @@ -581,6 +549,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) return -EIO; } + if ((priv->ucode_owner == IWL_OWNERSHIP_TM) && + !(cmd->flags & CMD_ON_DEMAND)) { + IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n"); + return -EIO; + } + copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); @@ -634,11 +608,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; - if (WARN_ON(out_meta->flags & CMD_MAPPED)) { - spin_unlock_irqrestore(&priv->hcmd_lock, flags); - return -ENOSPC; - } - memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; @@ -671,9 +640,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, priv->cmd_queue); - phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, - copy_size, PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { + phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { idx = -ENOMEM; goto out; } @@ -693,12 +662,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) continue; if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) continue; - phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i], - cmd->len[i], PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) { + phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i], + cmd->len[i], DMA_BIDIRECTIONAL); + if (dma_mapping_error(priv->bus->dev, phys_addr)) { iwlagn_unmap_tfd(priv, out_meta, &txq->tfds[q->write_ptr], - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); idx = -ENOMEM; goto out; } @@ -712,7 +681,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) #endif } - out_meta->flags = cmd->flags | CMD_MAPPED; + out_meta->flags = cmd->flags; txq->need_update = 1; @@ -748,9 +717,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) int nfreed = 0; if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { - IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " - "is out of range [0-%d] %d %d.\n", txq_id, - idx, q->n_bd, q->write_ptr, q->read_ptr); + IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), " + "index %d is out of range [0-%d] %d %d.\n", __func__, + txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr); return; } @@ -802,7 +771,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) cmd = txq->cmd[cmd_index]; meta = &txq->meta[cmd_index]; - iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL); + iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { @@ -822,8 +791,246 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) wake_up_interruptible(&priv->wait_command_queue); } - /* Mark as unmapped */ meta->flags = 0; spin_unlock_irqrestore(&priv->hcmd_lock, flags); } + +const char *get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_REMOVE_ALL_STA); + IWL_CMD(REPLY_TXFIFO_FLUSH); + IWL_CMD(REPLY_WEPKEY); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(COEX_PRIORITY_TABLE_CMD); + IWL_CMD(COEX_MEDIUM_NOTIFICATION); + IWL_CMD(COEX_EVENT_CMD); + IWL_CMD(REPLY_QUIET_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); + IWL_CMD(QUIET_NOTIFICATION); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(MEASURE_ABORT_NOTIFICATION); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(REPLY_CARD_STATE_CMD); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); + IWL_CMD(SENSITIVITY_CMD); + IWL_CMD(REPLY_PHY_CALIBRATION_CMD); + IWL_CMD(REPLY_RX_PHY_CMD); + IWL_CMD(REPLY_RX_MPDU_CMD); + IWL_CMD(REPLY_RX); + IWL_CMD(REPLY_COMPRESSED_BA); + IWL_CMD(CALIBRATION_CFG_CMD); + IWL_CMD(CALIBRATION_RES_NOTIFICATION); + IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); + IWL_CMD(REPLY_TX_POWER_DBM_CMD); + IWL_CMD(TEMPERATURE_NOTIFICATION); + IWL_CMD(TX_ANT_CONFIGURATION_CMD); + IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF); + IWL_CMD(REPLY_BT_COEX_PRIO_TABLE); + IWL_CMD(REPLY_BT_COEX_PROT_ENV); + IWL_CMD(REPLY_WIPAN_PARAMS); + IWL_CMD(REPLY_WIPAN_RXON); + IWL_CMD(REPLY_WIPAN_RXON_TIMING); + IWL_CMD(REPLY_WIPAN_RXON_ASSOC); + IWL_CMD(REPLY_WIPAN_QOS_PARAM); + IWL_CMD(REPLY_WIPAN_WEPKEY); + IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); + IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); + IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); + IWL_CMD(REPLY_WOWLAN_PATTERNS); + IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER); + IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS); + IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); + IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); + IWL_CMD(REPLY_WOWLAN_GET_STATUS); + default: + return "UNKNOWN"; + + } +} + +#define HOST_COMPLETE_TIMEOUT (2 * HZ) + +static void iwl_generic_cmd_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from %s (0x%08X)\n", + get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + switch (cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", + get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + break; + default: + IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", + get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + } +#endif +} + +static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + /* An asynchronous command can not expect an SKB to be set. */ + if (WARN_ON(cmd->flags & CMD_WANT_SKB)) + return -EINVAL; + + /* Assign a generic callback if one is not provided */ + if (!cmd->callback) + cmd->callback = iwl_generic_cmd_callback; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + + lockdep_assert_held(&priv->mutex); + + /* A synchronous command can not have a callback set. */ + if (WARN_ON(cmd->callback)) + return -EINVAL; + + IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", + get_cmd_string(cmd->id)); + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", + get_cmd_string(cmd->id)); + + cmd_idx = iwl_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + return ret; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERR(priv, + "Error sending %s: time out after %dms.\n", + get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command" + "%s\n", get_cmd_string(cmd->id)); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_ERR(priv, "Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { + IWL_ERR(priv, "Error: Response NULL in '%s'\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto cancel; + } + + return 0; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= + ~CMD_WANT_SKB; + } +fail: + if (cmd->reply_page) { + iwl_free_pages(priv, cmd->reply_page); + cmd->reply_page = 0; + } + + return ret; +} + +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + if (cmd->flags & CMD_ASYNC) + return iwl_send_cmd_async(priv, cmd); + + return iwl_send_cmd_sync(priv, cmd); +} + +int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len, + const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = { len, }, + .data = { data, }, + .flags = flags, + }; + + return iwl_send_cmd(priv, &cmd); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c new file mode 100644 index 00000000000..41f0de91400 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans.c @@ -0,0 +1,1172 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-dev.h" +#include "iwl-trans.h" +#include "iwl-core.h" +#include "iwl-helpers.h" +#include "iwl-trans-int-pcie.h" +/*TODO remove uneeded includes when the transport layer tx_free will be here */ +#include "iwl-agn.h" +#include "iwl-core.h" + +static int iwl_trans_rx_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct device *dev = priv->bus->dev; + + memset(&priv->rxq, 0, sizeof(priv->rxq)); + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + if (WARN_ON(rxq->bd || rxq->rb_stts)) + return -EINVAL; + + /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); + + /*Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb_stts; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); + + return 0; + +err_rb_stts: + dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); + rxq->bd = NULL; +err_bd: + return -ENOMEM; +} + +static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + int i; + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + DMA_FROM_DEVICE); + __iwl_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } +} + +static void iwl_trans_rx_hw_init(struct iwl_priv *priv, + struct iwl_rx_queue *rxq) +{ + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ + + rb_timeout = RX_RB_TIMEOUT; + + if (iwlagn_mod_params.amsdu_size_8K) + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + /* Reset driver's Rx queue write index */ + iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in + * the credit mechanism in 5000 HW RX FIFO + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | + rb_size| + (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); +} + +static int iwl_rx_init(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + int i, err; + unsigned long flags; + + if (!rxq->bd) { + err = iwl_trans_rx_alloc(priv); + if (err) + return err; + } + + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + iwl_trans_rxq_free_rx_bufs(priv); + + for (i = 0; i < RX_QUEUE_SIZE; i++) + rxq->queue[i] = NULL; + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); + + iwlagn_rx_replenish(priv); + + iwl_trans_rx_hw_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + rxq->need_update = 1; + iwl_rx_queue_update_write_ptr(priv, rxq); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static void iwl_trans_rx_free(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + unsigned long flags; + + /*if rxq->bd is NULL, it means that nothing has been allocated, + * exit now */ + if (!rxq->bd) { + IWL_DEBUG_INFO(priv, "Free NULL rx context\n"); + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + iwl_trans_rxq_free_rx_bufs(priv); + spin_unlock_irqrestore(&rxq->lock, flags); + + dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(priv->bus->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + else + IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n"); + memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); + rxq->rb_stts = NULL; +} + +static int iwl_trans_rx_stop(struct iwl_priv *priv) +{ + + /* stop Rx DMA */ + iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); +} + +static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr, size_t size) +{ + if (WARN_ON(ptr->addr)) + return -EINVAL; + + ptr->addr = dma_alloc_coherent(priv->bus->dev, size, + &ptr->dma, GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; + int i; + + if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) + return -EINVAL; + + txq->q.n_window = slots_num; + + txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num, + GFP_KERNEL); + txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num, + GFP_KERNEL); + + if (!txq->meta || !txq->cmd) + goto error; + + for (i = 0; i < slots_num; i++) { + txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), + GFP_KERNEL); + if (!txq->cmd[i]) + goto error; + } + + /* Alloc driver data array and TFD circular buffer */ + /* Driver private data, only for Tx (not command) queues, + * not shared with device. */ + if (txq_id != priv->cmd_queue) { + txq->txb = kzalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERR(priv, "kmalloc for auxiliary BD " + "structures failed\n"); + goto error; + } + } else { + txq->txb = NULL; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr, + GFP_KERNEL); + if (!txq->tfds) { + IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); + goto error; + } + txq->q.id = txq_id; + + return 0; +error: + kfree(txq->txb); + txq->txb = NULL; + /* since txq->cmd has been zeroed, + * all non allocated cmd[i] will be NULL */ + if (txq->cmd) + for (i = 0; i < slots_num; i++) + kfree(txq->cmd[i]); + kfree(txq->meta); + kfree(txq->cmd); + txq->meta = NULL; + txq->cmd = NULL; + + return -ENOMEM; + +} + +static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int ret; + + txq->need_update = 0; + memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); + + /* + * For the default queues 0-3, set up the swq_id + * already -- all others need to get one later + * (if they need one at all). + */ + if (txq_id < 4) + iwl_set_swq_id(txq, txq_id, txq_id); + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, + txq_id); + if (ret) + return ret; + + /* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * Circular buffer (TFD queue in DRAM) physical base address */ + iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + +/** + * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's + */ +static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + + if (!q->n_bd) + return; + + while (q->write_ptr != q->read_ptr) { + /* The read_ptr needs to bound by q->n_window */ + iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr)); + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); + } +} + +/** + * iwl_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct device *dev = priv->bus->dev; + int i; + if (WARN_ON(!txq)) + return; + + iwl_tx_queue_unmap(priv, txq_id); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i < txq->q.n_window; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) { + dma_free_coherent(dev, priv->hw_params.tfd_size * + txq->q.n_bd, txq->tfds, txq->q.dma_addr); + memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); + } + + /* De-alloc array of per-TFD driver data */ + kfree(txq->txb); + txq->txb = NULL; + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} + +/** + * iwl_trans_tx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +static void iwl_trans_tx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) { + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + iwl_tx_queue_free(priv, txq_id); + } + + kfree(priv->txq); + priv->txq = NULL; + + iwlagn_free_dma_ptr(priv, &priv->kw); + + iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); +} + +/** + * iwl_trans_tx_alloc - allocate TX context + * Allocate all Tx DMA structures and initialize them + * + * @param priv + * @return error code + */ +static int iwl_trans_tx_alloc(struct iwl_priv *priv) +{ + int ret; + int txq_id, slots_num; + + /*It is not allowed to alloc twice, so warn when this happens. + * We cannot rely on the previous allocation, so free and fail */ + if (WARN_ON(priv->txq)) { + ret = -EINVAL; + goto error; + } + + ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, + priv->hw_params.scd_bc_tbls_size); + if (ret) { + IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); + goto error; + } + + /* Alloc keep-warm buffer */ + ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); + if (ret) { + IWL_ERR(priv, "Keep Warm allocation failed\n"); + goto error; + } + + priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * + priv->cfg->base_params->num_of_queues, GFP_KERNEL); + if (!priv->txq) { + IWL_ERR(priv, "Not enough memory for txq\n"); + ret = ENOMEM; + goto error; + } + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == priv->cmd_queue) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (ret) { + IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + } + + return 0; + +error: + trans_tx_free(&priv->trans); + + return ret; +} +static int iwl_tx_init(struct iwl_priv *priv) +{ + int ret; + int txq_id, slots_num; + unsigned long flags; + bool alloc = false; + + if (!priv->txq) { + ret = iwl_trans_tx_alloc(priv); + if (ret) + goto error; + alloc = true; + } + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + iwl_write_prph(priv, SCD_TXFACT, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == priv->cmd_queue) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (ret) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return 0; +error: + /*Upon error, free only if we allocated something */ + if (alloc) + trans_tx_free(&priv->trans); + return ret; +} + +static void iwl_set_pwr_vmain(struct iwl_priv *priv) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do: + + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + */ + + iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); +} + +static int iwl_nic_init(struct iwl_priv *priv) +{ + unsigned long flags; + + /* nic_init */ + spin_lock_irqsave(&priv->lock, flags); + iwl_apm_init(priv); + + /* Set interrupt coalescing calibration timer to default (512 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); + + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_set_pwr_vmain(priv); + + priv->cfg->lib->nic_config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + iwl_rx_init(priv); + + /* Allocate or reset and init all Tx and Command queues */ + if (iwl_tx_init(priv)) + return -ENOMEM; + + if (priv->cfg->base_params->shadow_reg_enable) { + /* enable shadow regs in HW */ + iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL, + 0x800FFFFF); + } + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +#define HW_READY_TIMEOUT (50) + +/* Note: returns poll_bit return value, which is >= 0 if success */ +static int iwl_set_hw_ready(struct iwl_priv *priv) +{ + int ret; + + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + + IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : ""); + return ret; +} + +/* Note: returns standard 0/-ERROR code */ +static int iwl_trans_prepare_card_hw(struct iwl_priv *priv) +{ + int ret; + + IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n"); + + ret = iwl_set_hw_ready(priv); + if (ret >= 0) + return 0; + + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + + if (ret < 0) + return ret; + + /* HW should be ready by now, check again. */ + ret = iwl_set_hw_ready(priv); + if (ret >= 0) + return 0; + return ret; +} + +static int iwl_trans_start_device(struct iwl_priv *priv) +{ + int ret; + + priv->ucode_owner = IWL_OWNERSHIP_DRIVER; + + if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) && + iwl_trans_prepare_card_hw(priv)) { + IWL_WARN(priv, "Exit HW not ready\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + if (iwl_is_rfkill(priv)) { + wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); + iwl_enable_interrupts(priv); + return -ERFKILL; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + ret = iwl_nic_init(priv); + if (ret) { + IWL_ERR(priv, "Unable to init nic\n"); + return ret; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + return 0; +} + +/* + * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask + * must be called under priv->lock and mac access + */ +static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask) +{ + iwl_write_prph(priv, SCD_TXFACT, mask); +} + +#define IWL_AC_UNSET -1 + +struct queue_to_fifo_ac { + s8 fifo, ac; +}; + +static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { + { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, + { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, + { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, + { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, + { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, + { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, + { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, + { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, + { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, + { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, +}; + +static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { + { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, + { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, + { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, + { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, + { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, + { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, + { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, + { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, + { IWL_TX_FIFO_BE_IPAN, 2, }, + { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, +}; +static void iwl_trans_tx_start(struct iwl_priv *priv) +{ + const struct queue_to_fifo_ac *queue_to_fifo; + struct iwl_rxon_context *ctx; + u32 a; + unsigned long flags; + int i, chan; + u32 reg_val; + + spin_lock_irqsave(&priv->lock, flags); + + priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; + /* reset conext data memory */ + for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; + a += 4) + iwl_write_targ_mem(priv, a, 0); + /* reset tx status memory */ + for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; + a += 4) + iwl_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) + iwl_write_targ_mem(priv, a, 0); + + iwl_write_prph(priv, SCD_DRAM_BASE_ADDR, + priv->scd_bc_tbls.dma >> 10); + + /* Enable DMA channel */ + for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) + iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); + iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + iwl_write_prph(priv, SCD_QUEUECHAIN_SEL, + SCD_QUEUECHAIN_SEL_ALL(priv)); + iwl_write_prph(priv, SCD_AGGR_SEL, 0); + + /* initiate the queues */ + for (i = 0; i < priv->hw_params.max_txq_num; i++) { + iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0); + iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + iwl_write_targ_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(i), 0); + iwl_write_targ_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + ((SCD_WIN_SIZE << + SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + ((SCD_FRAME_LIMIT << + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + } + + iwl_write_prph(priv, SCD_INTERRUPT_MASK, + IWL_MASK(0, priv->hw_params.max_txq_num)); + + /* Activate all Tx DMA/FIFO channels */ + iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7)); + + /* map queues to FIFOs */ + if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) + queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; + else + queue_to_fifo = iwlagn_default_queue_to_tx_fifo; + + iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0); + + /* make sure all queue are not stopped */ + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); + for (i = 0; i < 4; i++) + atomic_set(&priv->queue_stop_count[i], 0); + for_each_context(priv, ctx) + ctx->last_tx_rejected = false; + + /* reset to 0 to enable all the queue first */ + priv->txq_ctx_active_msk = 0; + + BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10); + BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10); + + for (i = 0; i < 10; i++) { + int fifo = queue_to_fifo[i].fifo; + int ac = queue_to_fifo[i].ac; + + iwl_txq_ctx_activate(priv, i); + + if (fifo == IWL_TX_FIFO_UNUSED) + continue; + + if (ac != IWL_AC_UNSET) + iwl_set_swq_id(&priv->txq[i], ac, i); + iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Enable L1-Active */ + iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); +} + +/** + * iwlagn_txq_ctx_stop - Stop all Tx DMA channels + */ +static int iwl_trans_tx_stop(struct iwl_priv *priv) +{ + int ch, txq_id; + unsigned long flags; + + /* Turn off all Tx DMA fifos */ + spin_lock_irqsave(&priv->lock, flags); + + iwl_trans_txq_set_sched(priv, 0); + + /* Stop each Tx DMA channel, and wait for it to be idle */ + for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { + iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), + 1000)) + IWL_ERR(priv, "Failing on timeout while stopping" + " DMA channel %d [0x%08x]", ch, + iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (!priv->txq) { + IWL_WARN(priv, "Stopping tx queues that aren't allocated..."); + return 0; + } + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + iwl_tx_queue_unmap(priv, txq_id); + + return 0; +} + +static void iwl_trans_stop_device(struct iwl_priv *priv) +{ + unsigned long flags; + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + trans_sync_irq(&priv->trans); + + /* device going down, Stop using ICT table */ + iwl_disable_ict(priv); + + /* + * If a HW restart happens during firmware loading, + * then the firmware loading might call this function + * and later it might be called again due to the + * restart. So don't process again if the device is + * already dead. + */ + if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) { + iwl_trans_tx_stop(priv); + iwl_trans_rx_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_write_prph(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + } + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_apm_stop(priv); +} + +static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv, + int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_device_cmd *dev_cmd; + + if (unlikely(iwl_queue_space(q) < q->high_mark)) + return NULL; + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + dev_cmd = txq->cmd[q->write_ptr]; + memset(dev_cmd, 0, sizeof(*dev_cmd)); + dev_cmd->hdr.cmd = REPLY_TX; + dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + return &dev_cmd->cmd.tx; +} + +static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, + struct iwl_rxon_context *ctx) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr]; + struct iwl_cmd_meta *out_meta; + + dma_addr_t phys_addr = 0; + dma_addr_t txcmd_phys; + dma_addr_t scratch_phys; + u16 len, firstlen, secondlen; + u8 wait_write_ptr = 0; + u8 hdr_len = ieee80211_hdrlen(fc); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = ctx; + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->meta[q->write_ptr]; + + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + firstlen = (len + 3) & ~3; + + /* Tell NIC about any 2-byte padding after MAC header */ + if (firstlen != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = dma_map_single(priv->bus->dev, + &dev_cmd->hdr, firstlen, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys))) + return -1; + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, firstlen); + + if (!ieee80211_has_morefrags(fc)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + secondlen = skb->len - hdr_len; + if (secondlen > 0) { + phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len, + secondlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { + dma_unmap_single(priv->bus->dev, + dma_unmap_addr(out_meta, mapping), + dma_unmap_len(out_meta, len), + DMA_BIDIRECTIONAL); + return -1; + } + } + + /* Attach buffers to TFD */ + iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1); + if (secondlen > 0) + iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, + secondlen, 0); + + scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + /* take back ownership of DMA buffer to enable update */ + dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen, + DMA_BIDIRECTIONAL); + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", + le16_to_cpu(dev_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); + + /* Set up entry for this TFD in Tx byte-count array */ + if (ampdu) + iwl_trans_txq_update_byte_cnt_tbl(priv, txq, + le16_to_cpu(tx_cmd->len)); + + dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen, + DMA_BIDIRECTIONAL); + + trace_iwlwifi_dev_tx(priv, + &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &dev_cmd->hdr, firstlen, + skb->data + hdr_len, secondlen); + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_txq_update_write_ptr(priv, txq); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually, + * regardless of the value of ret. "ret" only indicates + * whether or not we should update the write pointer. + */ + if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { + if (wait_write_ptr) { + txq->need_update = 1; + iwl_txq_update_write_ptr(priv, txq); + } else { + iwl_stop_queue(priv, txq); + } + } + return 0; +} + +static void iwl_trans_kick_nic(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +static void iwl_trans_sync_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->bus->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static void iwl_trans_free(struct iwl_priv *priv) +{ + free_irq(priv->bus->irq, priv); + iwl_free_isr_ict(priv); +} + +static const struct iwl_trans_ops trans_ops = { + .start_device = iwl_trans_start_device, + .prepare_card_hw = iwl_trans_prepare_card_hw, + .stop_device = iwl_trans_stop_device, + + .tx_start = iwl_trans_tx_start, + + .rx_free = iwl_trans_rx_free, + .tx_free = iwl_trans_tx_free, + + .send_cmd = iwl_send_cmd, + .send_cmd_pdu = iwl_send_cmd_pdu, + + .get_tx_cmd = iwl_trans_get_tx_cmd, + .tx = iwl_trans_tx, + + .txq_agg_disable = iwl_trans_txq_agg_disable, + .txq_agg_setup = iwl_trans_txq_agg_setup, + + .kick_nic = iwl_trans_kick_nic, + + .sync_irq = iwl_trans_sync_irq, + .free = iwl_trans_free, +}; + +int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv) +{ + int err; + + priv->trans.ops = &trans_ops; + priv->trans.priv = priv; + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); + + iwl_alloc_isr_ict(priv); + + err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED, + DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq); + iwl_free_isr_ict(priv); + return err; + } + + INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); + + return 0; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h new file mode 100644 index 00000000000..7993aa7ae66 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -0,0 +1,225 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_trans_h__ +#define __iwl_trans_h__ + + /*This file includes the declaration that are exported from the transport + * layer */ + +struct iwl_priv; +struct iwl_rxon_context; +struct iwl_host_cmd; + +/** + * struct iwl_trans_ops - transport specific operations + * @start_device: allocates and inits all the resources for the transport + * layer. + * @prepare_card_hw: claim the ownership on the HW. Will be called during + * probe. + * @tx_start: starts and configures all the Tx fifo - usually done once the fw + * is alive. + * @stop_device:stops the whole device (embedded CPU put to reset) + * @rx_free: frees the rx memory + * @tx_free: frees the tx memory + * @send_cmd:send a host command + * @send_cmd_pdu:send a host command: flags can be CMD_* + * @get_tx_cmd: returns a pointer to a new Tx cmd for the upper layer use + * @tx: send an skb + * @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is + * ready and a successful ADDBA response has been received. + * @txq_agg_disable: de-configure a Tx queue to send AMPDUs + * @kick_nic: remove the RESET from the embedded CPU and let it run + * @sync_irq: the upper layer will typically disable interrupt and call this + * handler. After this handler returns, it is guaranteed that all + * the ISR / tasklet etc... have finished running and the transport + * layer shall not pass any Rx. + * @free: release all the ressource for the transport layer itself such as + * irq, tasklet etc... + */ +struct iwl_trans_ops { + + int (*start_device)(struct iwl_priv *priv); + int (*prepare_card_hw)(struct iwl_priv *priv); + void (*stop_device)(struct iwl_priv *priv); + void (*tx_start)(struct iwl_priv *priv); + void (*tx_free)(struct iwl_priv *priv); + void (*rx_free)(struct iwl_priv *priv); + + int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd); + + int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len, + const void *data); + struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id); + int (*tx)(struct iwl_priv *priv, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, + struct iwl_rxon_context *ctx); + + int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo); + void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid, + int frame_limit); + + void (*kick_nic)(struct iwl_priv *priv); + + void (*sync_irq)(struct iwl_priv *priv); + void (*free)(struct iwl_priv *priv); +}; + +struct iwl_trans { + const struct iwl_trans_ops *ops; + struct iwl_priv *priv; +}; + +static inline int trans_start_device(struct iwl_trans *trans) +{ + return trans->ops->start_device(trans->priv); +} + +static inline int trans_prepare_card_hw(struct iwl_trans *trans) +{ + return trans->ops->prepare_card_hw(trans->priv); +} + +static inline void trans_stop_device(struct iwl_trans *trans) +{ + trans->ops->stop_device(trans->priv); +} + +static inline void trans_tx_start(struct iwl_trans *trans) +{ + trans->ops->tx_start(trans->priv); +} + +static inline void trans_rx_free(struct iwl_trans *trans) +{ + trans->ops->rx_free(trans->priv); +} + +static inline void trans_tx_free(struct iwl_trans *trans) +{ + trans->ops->tx_free(trans->priv); +} + +static inline int trans_send_cmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + return trans->ops->send_cmd(trans->priv, cmd); +} + +static inline int trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags, + u16 len, const void *data) +{ + return trans->ops->send_cmd_pdu(trans->priv, id, flags, len, data); +} + +static inline struct iwl_tx_cmd *trans_get_tx_cmd(struct iwl_trans *trans, + int txq_id) +{ + return trans->ops->get_tx_cmd(trans->priv, txq_id); +} + +static inline int trans_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, + struct iwl_rxon_context *ctx) +{ + return trans->ops->tx(trans->priv, skb, tx_cmd, txq_id, fc, ampdu, ctx); +} + +static inline int trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + return trans->ops->txq_agg_disable(trans->priv, txq_id, + ssn_idx, tx_fifo); +} + +static inline void trans_txq_agg_setup(struct iwl_trans *trans, int sta_id, + int tid, int frame_limit) +{ + trans->ops->txq_agg_setup(trans->priv, sta_id, tid, frame_limit); +} + +static inline void trans_kick_nic(struct iwl_trans *trans) +{ + trans->ops->kick_nic(trans->priv); +} + +static inline void trans_sync_irq(struct iwl_trans *trans) +{ + trans->ops->sync_irq(trans->priv); +} + +static inline void trans_free(struct iwl_trans *trans) +{ + trans->ops->free(trans->priv); +} + +int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv); + +/*TODO: this functions should NOT be exported from trans module - export it + * until the reclaim flow will be brought to the transport module too */ + +struct iwl_tx_queue; +void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + +#endif /* __iwl_trans_h__ */ |