aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi/pcie
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c141
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c243
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c180
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c403
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h113
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c259
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h153
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c696
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1140
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c824
10 files changed, 1984 insertions, 2168 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
deleted file mode 100644
index f8620ecae6b..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-csr.h"
-#include "iwl-agn-hw.h"
-#include "cfg.h"
-
-/* Highest firmware API version supported */
-#define IWL1000_UCODE_API_MAX 5
-#define IWL100_UCODE_API_MAX 5
-
-/* Oldest version we won't warn about */
-#define IWL1000_UCODE_API_OK 5
-#define IWL100_UCODE_API_OK 5
-
-/* Lowest firmware API version supported */
-#define IWL1000_UCODE_API_MIN 1
-#define IWL100_UCODE_API_MIN 5
-
-/* EEPROM version */
-#define EEPROM_1000_TX_POWER_VERSION (4)
-#define EEPROM_1000_EEPROM_VERSION (0x15C)
-
-#define IWL1000_FW_PRE "iwlwifi-1000-"
-#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
-
-#define IWL100_FW_PRE "iwlwifi-100-"
-#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
-
-
-static const struct iwl_base_params iwl1000_base_params = {
- .num_of_queues = IWLAGN_NUM_QUEUES,
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
- .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
- .max_ll_items = OTP_MAX_LL_ITEMS_1000,
- .shadow_ram_support = false,
- .led_compensation = 51,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
- .wd_timeout = IWL_WATCHDOG_DISABLED,
- .max_event_log_size = 128,
-};
-
-static const struct iwl_ht_params iwl1000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
-};
-
-static const struct iwl_eeprom_params iwl1000_eeprom_params = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- }
-};
-
-#define IWL_DEVICE_1000 \
- .fw_name_pre = IWL1000_FW_PRE, \
- .ucode_api_max = IWL1000_UCODE_API_MAX, \
- .ucode_api_ok = IWL1000_UCODE_API_OK, \
- .ucode_api_min = IWL1000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_1000, \
- .max_inst_size = IWLAGN_RTC_INST_SIZE, \
- .max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .base_params = &iwl1000_base_params, \
- .eeprom_params = &iwl1000_eeprom_params, \
- .led_mode = IWL_LED_BLINK
-
-const struct iwl_cfg iwl1000_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
- IWL_DEVICE_1000,
- .ht_params = &iwl1000_ht_params,
-};
-
-const struct iwl_cfg iwl1000_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
- IWL_DEVICE_1000,
-};
-
-#define IWL_DEVICE_100 \
- .fw_name_pre = IWL100_FW_PRE, \
- .ucode_api_max = IWL100_UCODE_API_MAX, \
- .ucode_api_ok = IWL100_UCODE_API_OK, \
- .ucode_api_min = IWL100_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_100, \
- .max_inst_size = IWLAGN_RTC_INST_SIZE, \
- .max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .base_params = &iwl1000_base_params, \
- .eeprom_params = &iwl1000_eeprom_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .rx_with_siso_diversity = true
-
-const struct iwl_cfg iwl100_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
- IWL_DEVICE_100,
- .ht_params = &iwl1000_ht_params,
-};
-
-const struct iwl_cfg iwl100_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
- IWL_DEVICE_100,
-};
-
-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
deleted file mode 100644
index 244019cec3e..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-#include "cfg.h"
-#include "dvm/commands.h" /* needed for BT for now */
-
-/* Highest firmware API version supported */
-#define IWL2030_UCODE_API_MAX 6
-#define IWL2000_UCODE_API_MAX 6
-#define IWL105_UCODE_API_MAX 6
-#define IWL135_UCODE_API_MAX 6
-
-/* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 6
-#define IWL2000_UCODE_API_OK 6
-#define IWL105_UCODE_API_OK 6
-#define IWL135_UCODE_API_OK 6
-
-/* Lowest firmware API version supported */
-#define IWL2030_UCODE_API_MIN 5
-#define IWL2000_UCODE_API_MIN 5
-#define IWL105_UCODE_API_MIN 5
-#define IWL135_UCODE_API_MIN 5
-
-/* EEPROM version */
-#define EEPROM_2000_TX_POWER_VERSION (6)
-#define EEPROM_2000_EEPROM_VERSION (0x805)
-
-
-#define IWL2030_FW_PRE "iwlwifi-2030-"
-#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
-
-#define IWL2000_FW_PRE "iwlwifi-2000-"
-#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
-
-#define IWL105_FW_PRE "iwlwifi-105-"
-#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
-
-#define IWL135_FW_PRE "iwlwifi-135-"
-#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
-
-static const struct iwl_base_params iwl2000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
- .num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
- .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
- .shadow_ram_support = true,
- .led_compensation = 51,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
- .max_event_log_size = 512,
- .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
- .hd_v2 = true,
-};
-
-
-static const struct iwl_base_params iwl2030_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
- .num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
- .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
- .shadow_ram_support = true,
- .led_compensation = 57,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
- .wd_timeout = IWL_LONG_WD_TIMEOUT,
- .max_event_log_size = 512,
- .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
- .hd_v2 = true,
-};
-
-static const struct iwl_ht_params iwl2000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
-};
-
-static const struct iwl_bt_params iwl2030_bt_params = {
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .advanced_bt_coexist = true,
- .agg_time_limit = BT_AGG_THRESHOLD_DEF,
- .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
- .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
- .bt_sco_disable = true,
- .bt_session_2 = true,
-};
-
-static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .enhanced_txpower = true,
-};
-
-#define IWL_DEVICE_2000 \
- .fw_name_pre = IWL2000_FW_PRE, \
- .ucode_api_max = IWL2000_UCODE_API_MAX, \
- .ucode_api_ok = IWL2000_UCODE_API_OK, \
- .ucode_api_min = IWL2000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_2000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .base_params = &iwl2000_base_params, \
- .eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
- .led_mode = IWL_LED_RF_STATE
-
-const struct iwl_cfg iwl2000_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
- IWL_DEVICE_2000,
- .ht_params = &iwl2000_ht_params,
-};
-
-const struct iwl_cfg iwl2000_2bgn_d_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
- IWL_DEVICE_2000,
- .ht_params = &iwl2000_ht_params,
-};
-
-#define IWL_DEVICE_2030 \
- .fw_name_pre = IWL2030_FW_PRE, \
- .ucode_api_max = IWL2030_UCODE_API_MAX, \
- .ucode_api_ok = IWL2030_UCODE_API_OK, \
- .ucode_api_min = IWL2030_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_2030, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .base_params = &iwl2030_base_params, \
- .bt_params = &iwl2030_bt_params, \
- .eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true
-
-const struct iwl_cfg iwl2030_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
- IWL_DEVICE_2030,
- .ht_params = &iwl2000_ht_params,
-};
-
-#define IWL_DEVICE_105 \
- .fw_name_pre = IWL105_FW_PRE, \
- .ucode_api_max = IWL105_UCODE_API_MAX, \
- .ucode_api_ok = IWL105_UCODE_API_OK, \
- .ucode_api_min = IWL105_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_105, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .base_params = &iwl2000_base_params, \
- .eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true, \
- .rx_with_siso_diversity = true
-
-const struct iwl_cfg iwl105_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
- IWL_DEVICE_105,
- .ht_params = &iwl2000_ht_params,
-};
-
-const struct iwl_cfg iwl105_bgn_d_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
- IWL_DEVICE_105,
- .ht_params = &iwl2000_ht_params,
-};
-
-#define IWL_DEVICE_135 \
- .fw_name_pre = IWL135_FW_PRE, \
- .ucode_api_max = IWL135_UCODE_API_MAX, \
- .ucode_api_ok = IWL135_UCODE_API_OK, \
- .ucode_api_min = IWL135_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_135, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .base_params = &iwl2030_base_params, \
- .bt_params = &iwl2030_bt_params, \
- .eeprom_params = &iwl20x0_eeprom_params, \
- .need_temp_offset_calib = true, \
- .temp_offset_v2 = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true, \
- .rx_with_siso_diversity = true
-
-const struct iwl_cfg iwl135_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
- IWL_DEVICE_135,
- .ht_params = &iwl2000_ht_params,
-};
-
-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
deleted file mode 100644
index 83ca40321ff..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-#include "iwl-csr.h"
-#include "cfg.h"
-
-/* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 5
-#define IWL5150_UCODE_API_MAX 2
-
-/* Oldest version we won't warn about */
-#define IWL5000_UCODE_API_OK 5
-#define IWL5150_UCODE_API_OK 2
-
-/* Lowest firmware API version supported */
-#define IWL5000_UCODE_API_MIN 1
-#define IWL5150_UCODE_API_MIN 1
-
-/* EEPROM versions */
-#define EEPROM_5000_TX_POWER_VERSION (4)
-#define EEPROM_5000_EEPROM_VERSION (0x11A)
-#define EEPROM_5050_TX_POWER_VERSION (4)
-#define EEPROM_5050_EEPROM_VERSION (0x21E)
-
-#define IWL5000_FW_PRE "iwlwifi-5000-"
-#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
-
-#define IWL5150_FW_PRE "iwlwifi-5150-"
-#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
-
-static const struct iwl_base_params iwl5000_base_params = {
- .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
- .num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
- .led_compensation = 51,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
- .wd_timeout = IWL_WATCHDOG_DISABLED,
- .max_event_log_size = 512,
- .no_idle_support = true,
-};
-
-static const struct iwl_ht_params iwl5000_ht_params = {
- .ht_greenfield_support = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
-};
-
-static const struct iwl_eeprom_params iwl5000_eeprom_params = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
-};
-
-#define IWL_DEVICE_5000 \
- .fw_name_pre = IWL5000_FW_PRE, \
- .ucode_api_max = IWL5000_UCODE_API_MAX, \
- .ucode_api_ok = IWL5000_UCODE_API_OK, \
- .ucode_api_min = IWL5000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_5000, \
- .max_inst_size = IWLAGN_RTC_INST_SIZE, \
- .max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_5000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
- .base_params = &iwl5000_base_params, \
- .eeprom_params = &iwl5000_eeprom_params, \
- .led_mode = IWL_LED_BLINK
-
-const struct iwl_cfg iwl5300_agn_cfg = {
- .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
- IWL_DEVICE_5000,
- /* at least EEPROM 0x11A has wrong info */
- .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
- .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
-};
-
-const struct iwl_cfg iwl5100_bgn_cfg = {
- .name = "Intel(R) WiFi Link 5100 BGN",
- IWL_DEVICE_5000,
- .valid_tx_ant = ANT_B, /* .cfg overwrite */
- .valid_rx_ant = ANT_AB, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
-};
-
-const struct iwl_cfg iwl5100_abg_cfg = {
- .name = "Intel(R) WiFi Link 5100 ABG",
- IWL_DEVICE_5000,
- .valid_tx_ant = ANT_B, /* .cfg overwrite */
- .valid_rx_ant = ANT_AB, /* .cfg overwrite */
-};
-
-const struct iwl_cfg iwl5100_agn_cfg = {
- .name = "Intel(R) WiFi Link 5100 AGN",
- IWL_DEVICE_5000,
- .valid_tx_ant = ANT_B, /* .cfg overwrite */
- .valid_rx_ant = ANT_AB, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
-};
-
-const struct iwl_cfg iwl5350_agn_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
- .fw_name_pre = IWL5000_FW_PRE,
- .ucode_api_max = IWL5000_UCODE_API_MAX,
- .ucode_api_ok = IWL5000_UCODE_API_OK,
- .ucode_api_min = IWL5000_UCODE_API_MIN,
- .device_family = IWL_DEVICE_FAMILY_5000,
- .max_inst_size = IWLAGN_RTC_INST_SIZE,
- .max_data_size = IWLAGN_RTC_DATA_SIZE,
- .nvm_ver = EEPROM_5050_EEPROM_VERSION,
- .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .base_params = &iwl5000_base_params,
- .eeprom_params = &iwl5000_eeprom_params,
- .ht_params = &iwl5000_ht_params,
- .led_mode = IWL_LED_BLINK,
- .internal_wimax_coex = true,
-};
-
-#define IWL_DEVICE_5150 \
- .fw_name_pre = IWL5150_FW_PRE, \
- .ucode_api_max = IWL5150_UCODE_API_MAX, \
- .ucode_api_ok = IWL5150_UCODE_API_OK, \
- .ucode_api_min = IWL5150_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_5150, \
- .max_inst_size = IWLAGN_RTC_INST_SIZE, \
- .max_data_size = IWLAGN_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_5050_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
- .base_params = &iwl5000_base_params, \
- .eeprom_params = &iwl5000_eeprom_params, \
- .no_xtal_calib = true, \
- .led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true
-
-const struct iwl_cfg iwl5150_agn_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
- IWL_DEVICE_5150,
- .ht_params = &iwl5000_ht_params,
-
-};
-
-const struct iwl_cfg iwl5150_abg_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
- IWL_DEVICE_5150,
-};
-
-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
deleted file mode 100644
index d4df976d470..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-#include "cfg.h"
-#include "dvm/commands.h" /* needed for BT for now */
-
-/* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 6
-#define IWL6050_UCODE_API_MAX 5
-#define IWL6000G2_UCODE_API_MAX 6
-#define IWL6035_UCODE_API_MAX 6
-
-/* Oldest version we won't warn about */
-#define IWL6000_UCODE_API_OK 4
-#define IWL6000G2_UCODE_API_OK 5
-#define IWL6050_UCODE_API_OK 5
-#define IWL6000G2B_UCODE_API_OK 6
-#define IWL6035_UCODE_API_OK 6
-
-/* Lowest firmware API version supported */
-#define IWL6000_UCODE_API_MIN 4
-#define IWL6050_UCODE_API_MIN 4
-#define IWL6000G2_UCODE_API_MIN 5
-#define IWL6035_UCODE_API_MIN 6
-
-/* EEPROM versions */
-#define EEPROM_6000_TX_POWER_VERSION (4)
-#define EEPROM_6000_EEPROM_VERSION (0x423)
-#define EEPROM_6050_TX_POWER_VERSION (4)
-#define EEPROM_6050_EEPROM_VERSION (0x532)
-#define EEPROM_6150_TX_POWER_VERSION (6)
-#define EEPROM_6150_EEPROM_VERSION (0x553)
-#define EEPROM_6005_TX_POWER_VERSION (6)
-#define EEPROM_6005_EEPROM_VERSION (0x709)
-#define EEPROM_6030_TX_POWER_VERSION (6)
-#define EEPROM_6030_EEPROM_VERSION (0x709)
-#define EEPROM_6035_TX_POWER_VERSION (6)
-#define EEPROM_6035_EEPROM_VERSION (0x753)
-
-#define IWL6000_FW_PRE "iwlwifi-6000-"
-#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
-
-#define IWL6050_FW_PRE "iwlwifi-6050-"
-#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
-
-#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
-#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
-
-#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
-#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
-
-static const struct iwl_base_params iwl6000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
- .num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
- .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
- .shadow_ram_support = true,
- .led_compensation = 51,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
- .max_event_log_size = 512,
- .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
-};
-
-static const struct iwl_base_params iwl6050_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
- .num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
- .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
- .shadow_ram_support = true,
- .led_compensation = 51,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1500,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
- .max_event_log_size = 1024,
- .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
-};
-
-static const struct iwl_base_params iwl6000_g2_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
- .num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
- .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
- .shadow_ram_support = true,
- .led_compensation = 57,
- .adv_thermal_throttle = true,
- .support_ct_kill_exit = true,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .chain_noise_scale = 1000,
- .wd_timeout = IWL_LONG_WD_TIMEOUT,
- .max_event_log_size = 512,
- .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
-};
-
-static const struct iwl_ht_params iwl6000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
-};
-
-static const struct iwl_bt_params iwl6000_bt_params = {
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .advanced_bt_coexist = true,
- .agg_time_limit = BT_AGG_THRESHOLD_DEF,
- .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
- .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
- .bt_sco_disable = true,
-};
-
-static const struct iwl_eeprom_params iwl6000_eeprom_params = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- .enhanced_txpower = true,
-};
-
-#define IWL_DEVICE_6005 \
- .fw_name_pre = IWL6005_FW_PRE, \
- .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
- .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_6005, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_6005_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
- .base_params = &iwl6000_g2_base_params, \
- .eeprom_params = &iwl6000_eeprom_params, \
- .need_temp_offset_calib = true, \
- .led_mode = IWL_LED_RF_STATE
-
-const struct iwl_cfg iwl6005_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6005_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
- IWL_DEVICE_6005,
-};
-
-const struct iwl_cfg iwl6005_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
- IWL_DEVICE_6005,
-};
-
-const struct iwl_cfg iwl6005_2agn_sff_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6005_2agn_d_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
-
-#define IWL_DEVICE_6030 \
- .fw_name_pre = IWL6030_FW_PRE, \
- .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
- .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_6030, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
- .base_params = &iwl6000_g2_base_params, \
- .bt_params = &iwl6000_bt_params, \
- .eeprom_params = &iwl6000_eeprom_params, \
- .need_temp_offset_calib = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true \
-
-const struct iwl_cfg iwl6030_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
- IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6030_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
- IWL_DEVICE_6030,
-};
-
-const struct iwl_cfg iwl6030_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
- IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6030_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
- IWL_DEVICE_6030,
-};
-
-#define IWL_DEVICE_6035 \
- .fw_name_pre = IWL6030_FW_PRE, \
- .ucode_api_max = IWL6035_UCODE_API_MAX, \
- .ucode_api_ok = IWL6035_UCODE_API_OK, \
- .ucode_api_min = IWL6035_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_6030, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
- .base_params = &iwl6000_g2_base_params, \
- .bt_params = &iwl6000_bt_params, \
- .eeprom_params = &iwl6000_eeprom_params, \
- .need_temp_offset_calib = true, \
- .led_mode = IWL_LED_RF_STATE, \
- .adv_pm = true
-
-const struct iwl_cfg iwl6035_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
- IWL_DEVICE_6035,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl1030_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
- IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl1030_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
- IWL_DEVICE_6030,
-};
-
-const struct iwl_cfg iwl130_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
- IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
- .rx_with_siso_diversity = true,
-};
-
-const struct iwl_cfg iwl130_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
- IWL_DEVICE_6030,
- .rx_with_siso_diversity = true,
-};
-
-/*
- * "i": Internal configuration, use internal Power Amplifier
- */
-#define IWL_DEVICE_6000i \
- .fw_name_pre = IWL6000_FW_PRE, \
- .ucode_api_max = IWL6000_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000_UCODE_API_OK, \
- .ucode_api_min = IWL6000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_6000i, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
- .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
- .nvm_ver = EEPROM_6000_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
- .base_params = &iwl6000_base_params, \
- .eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_BLINK
-
-const struct iwl_cfg iwl6000i_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
- IWL_DEVICE_6000i,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6000i_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
- IWL_DEVICE_6000i,
-};
-
-const struct iwl_cfg iwl6000i_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
- IWL_DEVICE_6000i,
-};
-
-#define IWL_DEVICE_6050 \
- .fw_name_pre = IWL6050_FW_PRE, \
- .ucode_api_max = IWL6050_UCODE_API_MAX, \
- .ucode_api_min = IWL6050_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_6050, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
- .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
- .nvm_ver = EEPROM_6050_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
- .base_params = &iwl6050_base_params, \
- .eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true
-
-const struct iwl_cfg iwl6050_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
- IWL_DEVICE_6050,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6050_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
- IWL_DEVICE_6050,
-};
-
-#define IWL_DEVICE_6150 \
- .fw_name_pre = IWL6050_FW_PRE, \
- .ucode_api_max = IWL6050_UCODE_API_MAX, \
- .ucode_api_min = IWL6050_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_6150, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .nvm_ver = EEPROM_6150_EEPROM_VERSION, \
- .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
- .base_params = &iwl6050_base_params, \
- .eeprom_params = &iwl6000_eeprom_params, \
- .led_mode = IWL_LED_BLINK, \
- .internal_wimax_coex = true
-
-const struct iwl_cfg iwl6150_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
- IWL_DEVICE_6150,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6150_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
- IWL_DEVICE_6150,
-};
-
-const struct iwl_cfg iwl6000_3agn_cfg = {
- .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
- .fw_name_pre = IWL6000_FW_PRE,
- .ucode_api_max = IWL6000_UCODE_API_MAX,
- .ucode_api_ok = IWL6000_UCODE_API_OK,
- .ucode_api_min = IWL6000_UCODE_API_MIN,
- .device_family = IWL_DEVICE_FAMILY_6000,
- .max_inst_size = IWL60_RTC_INST_SIZE,
- .max_data_size = IWL60_RTC_DATA_SIZE,
- .nvm_ver = EEPROM_6000_EEPROM_VERSION,
- .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
- .base_params = &iwl6000_base_params,
- .eeprom_params = &iwl6000_eeprom_params,
- .ht_params = &iwl6000_ht_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
deleted file mode 100644
index 82152311d73..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/cfg.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_pci_h__
-#define __iwl_pci_h__
-
-
-/*
- * This file declares the config structures for all devices.
- */
-
-extern const struct iwl_cfg iwl5300_agn_cfg;
-extern const struct iwl_cfg iwl5100_agn_cfg;
-extern const struct iwl_cfg iwl5350_agn_cfg;
-extern const struct iwl_cfg iwl5100_bgn_cfg;
-extern const struct iwl_cfg iwl5100_abg_cfg;
-extern const struct iwl_cfg iwl5150_agn_cfg;
-extern const struct iwl_cfg iwl5150_abg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_cfg;
-extern const struct iwl_cfg iwl6005_2abg_cfg;
-extern const struct iwl_cfg iwl6005_2bg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
-extern const struct iwl_cfg iwl6005_2agn_d_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
-extern const struct iwl_cfg iwl1030_bgn_cfg;
-extern const struct iwl_cfg iwl1030_bg_cfg;
-extern const struct iwl_cfg iwl6030_2agn_cfg;
-extern const struct iwl_cfg iwl6030_2abg_cfg;
-extern const struct iwl_cfg iwl6030_2bgn_cfg;
-extern const struct iwl_cfg iwl6030_2bg_cfg;
-extern const struct iwl_cfg iwl6000i_2agn_cfg;
-extern const struct iwl_cfg iwl6000i_2abg_cfg;
-extern const struct iwl_cfg iwl6000i_2bg_cfg;
-extern const struct iwl_cfg iwl6000_3agn_cfg;
-extern const struct iwl_cfg iwl6050_2agn_cfg;
-extern const struct iwl_cfg iwl6050_2abg_cfg;
-extern const struct iwl_cfg iwl6150_bgn_cfg;
-extern const struct iwl_cfg iwl6150_bg_cfg;
-extern const struct iwl_cfg iwl1000_bgn_cfg;
-extern const struct iwl_cfg iwl1000_bg_cfg;
-extern const struct iwl_cfg iwl100_bgn_cfg;
-extern const struct iwl_cfg iwl100_bg_cfg;
-extern const struct iwl_cfg iwl130_bgn_cfg;
-extern const struct iwl_cfg iwl130_bg_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
-extern const struct iwl_cfg iwl2030_2bgn_cfg;
-extern const struct iwl_cfg iwl6035_2agn_cfg;
-extern const struct iwl_cfg iwl105_bgn_cfg;
-extern const struct iwl_cfg iwl105_bgn_d_cfg;
-extern const struct iwl_cfg iwl135_bgn_cfg;
-
-#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index c2e141af353..98950e45c7b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,11 +66,10 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-drv.h"
-
-#include "cfg.h"
#include "internal.h"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -80,6 +79,7 @@
/* Hardware specific file defines the PCI IDs table for that hardware module */
static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+#if IS_ENABLED(CONFIG_IWLDVM)
{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -131,6 +131,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
@@ -139,13 +140,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6x00 Series */
{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
@@ -153,12 +157,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,9 +248,13 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6x35 Series */
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
/* 105 Series */
{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
@@ -254,11 +266,219 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+#endif /* CONFIG_IWLDVM */
+
+#if IS_ENABLED(CONFIG_IWLMVM)
+/* 7260 Series */
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+
+/* 3160 Series */
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
+
+/* 7265 Series */
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#ifdef CONFIG_ACPI
+#define SPL_METHOD "SPLC"
+#define SPL_DOMAINTYPE_MODULE BIT(0)
+#define SPL_DOMAINTYPE_WIFI BIT(1)
+#define SPL_DOMAINTYPE_WIGIG BIT(2)
+#define SPL_DOMAINTYPE_RFEM BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+ union acpi_object *limits, *domain_type, *power_limit;
+
+ if (splx->type != ACPI_TYPE_PACKAGE ||
+ splx->package.count != 2 ||
+ splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splx->package.elements[0].integer.value != 0) {
+ IWL_ERR(trans, "Unsupported splx structure\n");
+ return 0;
+ }
+
+ limits = &splx->package.elements[1];
+ if (limits->type != ACPI_TYPE_PACKAGE ||
+ limits->package.count < 2 ||
+ limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ IWL_ERR(trans, "Invalid limits element\n");
+ return 0;
+ }
+
+ domain_type = &limits->package.elements[0];
+ power_limit = &limits->package.elements[1];
+ if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
+ return 0;
+ }
+
+ return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+ struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+ if (!pxsx_handle) {
+ IWL_DEBUG_INFO(trans,
+ "Could not retrieve root port ACPI handle\n");
+ return;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_INFO(trans, "SPL method not found\n");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
+ return;
+ }
+
+ trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
+ trans->dflt_pwr_limit);
+ kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -270,19 +490,21 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
- if (iwl_trans == NULL)
- return -ENOMEM;
+ if (IS_ERR(iwl_trans))
+ return PTR_ERR(iwl_trans);
pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
- if (IS_ERR_OR_NULL(trans_pcie->drv)) {
+ if (IS_ERR(trans_pcie->drv)) {
ret = PTR_ERR(trans_pcie->drv);
goto out_free_trans;
}
+ set_dflt_pwr_limit(iwl_trans, pdev);
+
/* register transport layer debugfs here */
ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
if (ret)
@@ -294,7 +516,6 @@ out_free_drv:
iwl_drv_stop(trans_pcie->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -305,29 +526,25 @@ static void iwl_pci_remove(struct pci_dev *pdev)
iwl_drv_stop(trans_pcie->drv);
iwl_trans_pcie_free(trans);
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
static int iwl_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
-
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_trans_suspend(iwl_trans);
+ return 0;
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -340,7 +557,15 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_trans_resume(iwl_trans);
+ if (!trans->op_mode)
+ return 0;
+
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ return 0;
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d91d2e8c62f..6c22b23a284 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -102,7 +102,7 @@ struct iwl_rxq {
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
- int need_update;
+ bool need_update;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@@ -117,30 +117,24 @@ struct iwl_dma_ptr {
/**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
+static inline int iwl_queue_inc_wrap(int index)
{
- return ++index & (n_bd - 1);
+ return ++index & (TFD_QUEUE_SIZE_MAX - 1);
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
+static inline int iwl_queue_dec_wrap(int index)
{
- return --index & (n_bd - 1);
+ return --index & (TFD_QUEUE_SIZE_MAX - 1);
}
struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
-
- DEFINE_DMA_UNMAP_ADDR(mapping);
- DEFINE_DMA_UNMAP_LEN(len);
-
u32 flags;
};
@@ -149,13 +143,13 @@ struct iwl_cmd_meta {
*
* Contains common data for Rx and Tx queues.
*
- * Note the difference between n_bd and n_window: the hardware
- * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
* the software buffers (in the variables @meta, @txb in struct
* iwl_txq) only have 32 entries, while the HW buffers (@tfds in
* the same struct) have 256.
@@ -166,7 +160,6 @@ struct iwl_cmd_meta {
* data is a window overlayed over the HW queue.
*/
struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
/* use for monitoring and recovering the stuck queue */
@@ -182,25 +175,46 @@ struct iwl_queue {
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
+/*
+ * The FH will write back to the first TB only, so we need
+ * to copy some data into the buffer regardless of whether
+ * it should be mapped or not. This indicates how big the
+ * first TB must be to include the scratch buffer. Since
+ * the scratch is 4 bytes at offset 12, it's 16 now. If we
+ * make it bigger then allocations will be bigger and copy
+ * slower, so that's probably not useful.
+ */
+#define IWL_HCMD_SCRATCHBUF_SIZE 16
+
struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd;
- struct iwl_device_cmd *copy_cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
struct iwl_cmd_meta meta;
};
+struct iwl_pcie_txq_scratch_buf {
+ struct iwl_cmd_header hdr;
+ u8 buf[8];
+ __le32 scratch;
+};
+
/**
* struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
+ * @scratchbufs: start of command headers, including scratch buffers, for
+ * the writeback -- this is DMA memory and an array holding one buffer
+ * for each command on the queue
+ * @scratchbufs_dma: DMA address for the scratchbufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
* @stuck_timer: timer that fires if queue gets stuck
* @trans_pcie: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
* @active: stores if queue is active
+ * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -208,22 +222,30 @@ struct iwl_pcie_txq_entry {
struct iwl_txq {
struct iwl_queue q;
struct iwl_tfd *tfds;
+ struct iwl_pcie_txq_scratch_buf *scratchbufs;
+ dma_addr_t scratchbufs_dma;
struct iwl_pcie_txq_entry *entries;
spinlock_t lock;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
- u8 need_update;
+ bool need_update;
u8 active;
+ bool ampdu;
};
+static inline dma_addr_t
+iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->scratchbufs_dma +
+ sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
+}
+
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_replenish: work that will be called when buffers need to be allocated
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
- * @irq - the irq number for the device
- * @irq_requested: true when the irq has been requested
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
@@ -231,11 +253,13 @@ struct iwl_txq {
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
- * @status - transport specific status flags
* @cmd_queue - command queue number
* @rx_buf_size_8k: 8 kB RX buffer size
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
+ * @reg_lock: protect hw register access
+ * @cmd_in_flight: true when we have a host command in flight
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
@@ -243,17 +267,16 @@ struct iwl_trans_pcie {
struct iwl_trans *trans;
struct iwl_drv *drv;
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* INT ICT Table */
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
int ict_index;
- u32 inta;
bool use_ict;
- bool irq_requested;
- struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats;
- unsigned int irq;
spinlock_t irq_lock;
u32 inta_mask;
u32 scd_base_addr;
@@ -272,37 +295,23 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t wait_command_queue;
- unsigned long status;
u8 cmd_queue;
u8 cmd_fifo;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
bool rx_buf_size_8k;
+ bool bc_table_dword;
u32 rx_page_order;
- const char **command_names;
+ const char *const *command_names;
/* queue watchdog */
unsigned long wd_timeout;
-};
-/**
- * enum iwl_pcie_status: status of the PCIe transport
- * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
- * @STATUS_DEVICE_ENABLED: APM is enabled
- * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
- * @STATUS_INT_ENABLED: interrupts are enabled
- * @STATUS_RFKILL: the HW RFkill switch is in KILL position
- * @STATUS_FW_ERROR: the fw is in error state
- */
-enum iwl_pcie_status {
- STATUS_HCMD_ACTIVE,
- STATUS_DEVICE_ENABLED,
- STATUS_TPOWER_PMI,
- STATUS_INT_ENABLED,
- STATUS_RFKILL,
- STATUS_FW_ERROR,
+ /*protect hw register */
+ spinlock_t reg_lock;
+ bool cmd_in_flight;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -328,14 +337,14 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
-void iwl_pcie_tasklet(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
******************************************************/
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr(int irq, void *data);
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
@@ -353,16 +362,24 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
/*****************************************************
* Error handling
******************************************************/
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_pcie_dump_csr(struct iwl_trans *trans);
/*****************************************************
@@ -370,8 +387,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
/* disable interrupts from uCode/NIC to host */
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -388,14 +404,18 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ set_bit(STATUS_INT_ENABLED, &trans->status);
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
@@ -448,4 +468,33 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index dad4c4aad91..a2698e5e062 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -81,10 +81,10 @@
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ * If there were enough free buffers and RX_STALLED is set it is cleared.
*
*
* Driver sequence:
@@ -110,16 +110,18 @@
/*
* iwl_rxq_space - Return number of free slots available in queue.
*/
-static int iwl_rxq_space(const struct iwl_rxq *q)
+static int iwl_rxq_space(const struct iwl_rxq *rxq)
{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ /* Make sure RX_QUEUE_SIZE is a power of 2 */
+ BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+
+ /*
+ * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+ * between empty and completely full queues.
+ * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+ * defined for negative dividends.
+ */
+ return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
}
/*
@@ -143,54 +145,52 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
{
- unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 reg;
- spin_lock_irqsave(&q->lock, flags);
+ lockdep_assert_held(&rxq->lock);
- if (q->need_update == 0)
- goto exit_unlock;
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+ reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ rxq->need_update = true;
+ return;
+ }
+ }
- if (trans->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
- } else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
+ rxq->write_actual = round_down(rxq->write, 8);
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+}
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- q->write_actual);
- }
- }
- q->need_update = 0;
+ spin_lock(&rxq->lock);
+
+ if (!rxq->need_update)
+ goto exit_unlock;
+
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ rxq->need_update = false;
exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -209,20 +209,19 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
/*
* If the device isn't enabled - not need to try to add buffers...
* This can happen when we stop the device and still have an interrupt
- * pending. We stop the APM before we sync the interrupts / tasklets
- * because we have to (see comment there). On the other hand, since
- * the APM is stopped, we cannot access the HW (in particular not prph).
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
@@ -239,7 +238,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
@@ -248,10 +247,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
}
}
@@ -270,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
return;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
@@ -308,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
return;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@@ -329,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
@@ -340,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
}
@@ -355,19 +352,16 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i;
- /* Fill the rx_used queue with _all_ of the Rx buffers */
+ lockdep_assert_held(&rxq->lock);
+
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- __free_pages(rxq->pool[i].page,
- trans_pcie->rx_page_order);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ if (!rxq->pool[i].page)
+ continue;
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
+ rxq->pool[i].page = NULL;
}
}
@@ -379,21 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
-
- iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- iwl_pcie_rxq_restock(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-}
-
-static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
-{
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_restock(trans);
}
@@ -403,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_pcie_rx_replenish(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -436,7 +418,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
err_rb_stts:
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd_dma = 0;
rxq->bd = NULL;
err_bd:
return -ENOMEM;
@@ -455,6 +437,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
/* Stop Rx DMA */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ /* reset and flush pointers */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
@@ -485,15 +471,31 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ /* W/A for interrupt coalescing bug in 7260 and 3160 */
+ if (trans->cfg->host_interrupt_operation_mode)
+ iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
+}
+
+static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+{
+ int i;
+
+ lockdep_assert_held(&rxq->lock);
+
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ rxq->free_count = 0;
+
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add(&rxq->pool[i].list, &rxq->rx_used);
}
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
-
int i, err;
- unsigned long flags;
if (!rxq->bd) {
err = iwl_pcie_rx_alloc(trans);
@@ -501,14 +503,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
+ spin_lock(&rxq->lock);
- INIT_WORK(&trans_pcie->rx_replenish,
- iwl_pcie_rx_replenish_work);
+ INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
+ /* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
+ iwl_pcie_rx_init_rxb_lists(rxq);
for (i = 0; i < RX_QUEUE_SIZE; i++)
rxq->queue[i] = NULL;
@@ -517,17 +518,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+ spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans);
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- rxq->need_update = 1;
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
return 0;
}
@@ -536,7 +536,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -545,13 +544,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ cancel_work_sync(&trans_pcie->rx_replenish);
+
+ spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd_dma = 0;
rxq->bd = NULL;
if (rxq->rb_stts)
@@ -560,7 +561,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts, rxq->rb_stts_dma);
else
IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
- memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts_dma = 0;
rxq->rb_stts = NULL;
}
@@ -570,7 +571,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
@@ -588,6 +588,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int index, cmd_index, err, len;
struct iwl_rx_cmd_buffer rxcb = {
._offset = offset,
+ ._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
.truesize = max_len,
@@ -602,7 +603,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
pkt->hdr.cmd);
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
@@ -630,22 +631,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index);
- if (reclaim) {
- struct iwl_pcie_txq_entry *ent;
- ent = &txq->entries[cmd_index];
- cmd = ent->copy_cmd;
- WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
- } else {
+ if (reclaim)
+ cmd = txq->entries[cmd_index].cmd;
+ else
cmd = NULL;
- }
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
if (reclaim) {
- /* The original command isn't needed any more */
- kfree(txq->entries[cmd_index].copy_cmd);
- txq->entries[cmd_index].copy_cmd = NULL;
- /* nor is the duplicated part of the command */
kfree(txq->entries[cmd_index].free_buf);
txq->entries[cmd_index].free_buf = NULL;
}
@@ -679,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -700,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
}
/*
@@ -715,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
u32 count = 8;
int total_empty;
+restart:
+ spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -749,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_pcie_rx_replenish_now(trans);
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
count = 0;
+ goto restart;
}
}
}
/* Backtrack one entry */
rxq->read = i;
+ spin_unlock(&rxq->lock);
+
if (fill_rx)
- iwl_pcie_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
else
iwl_pcie_rxq_restock(trans);
+
+ if (trans_pcie->napi.poll)
+ napi_gro_flush(&trans_pcie->napi, false);
}
/*
@@ -776,35 +776,166 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
wake_up(&trans_pcie->wait_command_queue);
return;
}
iwl_pcie_dump_csr(trans);
- iwl_pcie_dump_fh(trans, NULL);
+ iwl_dump_fh(trans, NULL);
+
+ local_bh_disable();
+ /* The STATUS_FW_ERROR bit is set in this function. This must happen
+ * before we wake up the command caller, to ensure a proper cleanup. */
+ iwl_trans_fw_error(trans);
+ local_bh_enable();
- set_bit(STATUS_FW_ERROR, &trans_pcie->status);
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+ u32 inta;
+
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(trans, CSR_INT);
+
+ /* the thread will service interrupts and re-enable them */
+ return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+ u32 val = 0;
+ u32 read;
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+ if (!read)
+ return 0;
+
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index, read);
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
+
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+ read);
+ } while (read);
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
- iwl_op_mode_nic_error(trans->op_mode);
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ return inta;
}
-void iwl_pcie_tasklet(struct iwl_trans *trans)
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
+ struct iwl_trans *trans = dev_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_mask;
-#endif
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+ spin_lock(&trans_pcie->irq_lock);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (likely(trans_pcie->use_ict))
+ inta = iwl_pcie_int_cause_ict(trans);
+ else
+ inta = iwl_pcie_int_cause_non_ict(trans);
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+ inta, trans_pcie->inta_mask,
+ iwl_read32(trans, CSR_INT_MASK),
+ iwl_read32(trans, CSR_FH_INT_STATUS));
+ if (inta & (~trans_pcie->inta_mask))
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta & (~trans_pcie->inta_mask));
+ }
+
+ inta &= trans_pcie->inta_mask;
+
+ /*
+ * Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC.
+ */
+ if (unlikely(!inta)) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ /*
+ * Re-enable interrupts here since we don't
+ * have anything to service
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /*
+ * Hardware disappeared. It might have
+ * already raised an interrupt.
+ */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ spin_unlock(&trans_pcie->irq_lock);
+ goto out;
+ }
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -817,24 +948,13 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(trans, CSR_INT,
- trans_pcie->inta | ~trans_pcie->inta_mask);
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
- inta = trans_pcie->inta;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- /* just for debug */
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
+ if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
- inta, inta_mask);
- }
-#endif
-
- /* saved interrupt in inta variable now we can reset trans_pcie->inta */
- trans_pcie->inta = 0;
+ inta, iwl_read32(trans, CSR_INT_MASK));
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -848,15 +968,14 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
handled |= CSR_INT_BIT_HW_ERR;
- return;
+ goto out;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
- "the frame/frames.\n");
+ IWL_DEBUG_ISR(trans,
+ "Scheduler finished to transmit the frame/frames.\n");
isr_stats->sch++;
}
@@ -866,7 +985,7 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
isr_stats->alive++;
}
}
-#endif
+
/* Safely ignore these bits for debug checks below */
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
@@ -880,16 +999,16 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
isr_stats->rfkill++;
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill) {
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))
+ set_bit(STATUS_RFKILL, &trans->status);
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
}
handled |= CSR_INT_BIT_RF_KILL;
@@ -914,9 +1033,8 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
isr_stats->wakeup++;
@@ -954,8 +1072,6 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_pcie_rx_handle(trans);
-
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
@@ -968,6 +1084,10 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
+
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans);
+ local_bh_enable();
}
/* This "Tx" DMA channel is used only for loading uCode */
@@ -993,11 +1113,15 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
iwl_enable_interrupts(trans);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_enable_rfkill_int(trans);
+
+out:
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_HANDLED;
}
/******************************************************************************
@@ -1006,11 +1130,6 @@ void iwl_pcie_tasklet(struct iwl_trans *trans)
*
******************************************************************************/
-/* a device (PCI-E) page is 4096 bytes long */
-#define ICT_SHIFT 12
-#define ICT_SIZE (1 << ICT_SHIFT)
-#define ICT_COUNT (ICT_SIZE / sizeof(u32))
-
/* Free dram table */
void iwl_pcie_free_ict(struct iwl_trans *trans)
{
@@ -1035,7 +1154,7 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_alloc_coherent(trans->dev, ICT_SIZE,
+ dma_zalloc_coherent(trans->dev, ICT_SIZE,
&trans_pcie->ict_tbl_dma,
GFP_KERNEL);
if (!trans_pcie->ict_tbl)
@@ -1047,17 +1166,10 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
return -EINVAL;
}
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
- (unsigned long long)trans_pcie->ict_tbl_dma);
-
- IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
-
- /* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- trans_pcie->ict_index = 0;
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ trans_pcie->ict_tbl);
- /* add periodic RX interrupt */
- trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
return 0;
}
@@ -1068,12 +1180,11 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
- unsigned long flags;
if (!trans_pcie->ict_tbl)
return;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -1090,207 +1201,32 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
-/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_pcie_isr(int irq, void *data)
+irqreturn_t iwl_pcie_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 inta, inta_mask;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
-
- lockdep_assert_held(&trans_pcie->irq_lock);
-
- trace_iwlwifi_dev_irq(trans->dev);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(trans, CSR_INT);
-
- if (inta & (~inta_mask)) {
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt (0x%08x)...Ack and ignore\n",
- inta & (~inta_mask));
- iwl_write32(trans, CSR_INT, inta & (~inta_mask));
- inta &= inta_mask;
- }
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- return IRQ_HANDLED;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_have_debug_level(IWL_DL_ISR)) {
- inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
-
- trans_pcie->inta |= inta;
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- return IRQ_NONE;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
-{
- struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie;
- u32 inta, inta_mask;
- u32 val = 0;
- u32 read;
- unsigned long flags;
if (!trans)
return IRQ_NONE;
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (unlikely(!trans_pcie->use_ict)) {
- irqreturn_t ret = iwl_pcie_isr(irq, data);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
- }
-
- trace_iwlwifi_dev_irq(trans->dev);
-
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
* If we *don't* have something, we'll re-enable before leaving here.
*/
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
- if (!read) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /*
- * Collect all entries up to the first 0, starting from ict_index;
- * note we already read at ict_index.
- */
- do {
- val |= read;
- IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index, read);
- trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
- trans_pcie->ict_index =
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
-
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
- read);
- } while (read);
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
-
- inta &= trans_pcie->inta_mask;
- trans_pcie->inta |= inta;
-
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&trans_pcie->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(trans);
- }
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 35708b959ad..788085bc65d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,23 +73,33 @@
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-agn-hw.h"
+#include "iwl-fw-error-dump.h"
#include "internal.h"
-static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (2 << 28)));
+ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+}
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+{
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (3 << 28)));
+}
- iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
+static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
+{
+ if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ else
+ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
}
/* PCI registers */
@@ -128,7 +138,6 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
*/
static int iwl_pcie_apm_init(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
@@ -138,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
@@ -184,26 +194,165 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
goto out;
}
+ if (trans->cfg->host_interrupt_operation_mode) {
+ /*
+ * This is a bit of an abuse - This is needed for 7260 / 3160
+ * only check host_interrupt_operation_mode even if this is
+ * not related to host_interrupt_operation_mode.
+ *
+ * Enable the oscillator to count wake up time for L1 exit. This
+ * consumes slightly more power (100uA) - but allows to be sure
+ * that we wake up from L1 on time.
+ *
+ * This looks weird: read twice the same register, discard the
+ * value, set a bit, and yet again, read that same register
+ * just to discard the value. But that's the way the hardware
+ * seems to like it.
+ */
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ }
+
/*
* Enable DMA clock and wait for it to stabilize.
*
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- /* Disable L1-Active */
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
- set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
out:
return ret;
}
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+ int ret;
+ u32 apmg_gp1_reg;
+ u32 apmg_xtal_cfg_reg;
+ u32 dl_cfg_reg;
+
+ /* Force XTAL ON */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is possible.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (WARN_ON(ret < 0)) {
+ IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ return;
+ }
+
+ /*
+ * Clear "disable persistence" to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+ /*
+ * Force APMG XTAL to be active to prevent its disabling by HW
+ * caused by APMG idle state.
+ */
+ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+ SHR_APMG_XTAL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg |
+ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+ /*
+ * Reset entire device again - do controller reset (results in
+ * SHRD_HW_RST). Turn MAC off before proceeding.
+ */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /* Enable LP XTAL by indirect access through CSR */
+ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+ SHR_APMG_GP1_WF_XTAL_LP_EN |
+ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+ /* Clear delay line clock power up */
+ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+ /*
+ * Enable persistence mode to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* Activates XTAL resources monitor */
+ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+ CSR_MONITOR_XTAL_RESOURCES);
+
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ udelay(10);
+
+ /* Release APMG XTAL */
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg &
+ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -224,14 +373,18 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
static void iwl_pcie_apm_stop(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
+ if (trans->cfg->lp_xtal_workaround) {
+ iwl_pcie_apm_lp_xtal_enable(trans);
+ return;
+ }
+
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -248,18 +401,15 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
static int iwl_pcie_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
/* nic_init */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_apm_init(trans);
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+ spin_unlock(&trans_pcie->irq_lock);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- iwl_pcie_set_pwr_vmain(trans);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -304,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
+ int iter;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -312,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0)
return 0;
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ for (iter = 0; iter < 10; iter++) {
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ ret = iwl_pcie_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
- do {
- ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ usleep_range(200, 1000);
+ t += 200;
+ } while (t < 150000);
+ msleep(25);
+ }
- usleep_range(200, 1000);
- t += 200;
- } while (t < 150000);
+ IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
return ret;
}
@@ -383,20 +539,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
{
u8 *v_addr;
dma_addr_t p_addr;
- u32 offset;
+ u32 offset, chunk_sz = section->len;
int ret = 0;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
section_num);
- v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
- if (!v_addr)
- return -ENOMEM;
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!v_addr) {
+ IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
+ chunk_sz = PAGE_SIZE;
+ v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
+ &p_addr, GFP_KERNEL);
+ if (!v_addr)
+ return -ENOMEM;
+ }
- for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
+ for (offset = 0; offset < section->len; offset += chunk_sz) {
u32 copy_size;
- copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
+ copy_size = min_t(u32, chunk_sz, section->len - offset);
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
ret = iwl_pcie_load_firmware_chunk(trans,
@@ -410,34 +573,192 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
}
}
- dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
+ dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
return ret;
}
-static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
- const struct fw_img *image)
+static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
+ int shift_param;
int i, ret = 0;
+ u32 last_read_idx = 0;
- for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
- if (!image->sec[i].data)
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
+
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
break;
+ }
+
+ if (i == (*first_ucode_section) + 1)
+ /* set CPU to started */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_HDRS_LOADING_COMPLETED
+ << shift_param);
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
+ /* image loading complete */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
+
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ (LMPM_CPU_UCODE_LOADING_COMPLETED |
+ LMPM_CPU_HDRS_LOADING_COMPLETED |
+ LMPM_CPU_UCODE_LOADING_STARTED) <<
+ shift_param);
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans,
+ "working with %s image\n",
+ image->is_secure ? "Secured" : "Non Secured");
+ IWL_DEBUG_FW(trans,
+ "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* configure the ucode to be ready to get the secured image */
+ if (image->is_secure) {
+ /* set secure boot inspector addresses */
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_CODE_ADDR,
+ LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_DATA_ADDR,
+ LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+
+ /* set CPU1 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
+ LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ } else {
+ /* load to FW the binary Non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
+ /* load to FW the binary sections of CPU2 */
+ if (image->is_secure)
+ ret = iwl_pcie_load_cpu_secured_sections(
+ trans, image, 2,
+ &first_ucode_section);
+ else
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ /* release CPU reset */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+ else
+ iwl_write32(trans, CSR_RESET, 0);
+
+ if (image->is_secure) {
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
+ }
+ }
return 0;
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw)
+ const struct fw_img *fw, bool run_in_rfkill)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
@@ -447,14 +768,16 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
return -EIO;
}
- clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
-
iwl_enable_rfkill_int(trans);
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
@@ -491,12 +814,14 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
+ bool hw_rfkill, was_hw_rfkill;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
@@ -508,7 +833,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -528,125 +853,180 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- iwl_enable_rfkill_int(trans);
-
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(trans_pcie->irq);
- tasklet_kill(&trans_pcie->irq_tasklet);
-
- cancel_work_sync(&trans_pcie->rx_replenish);
+ spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/* clear all status bits */
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
}
-static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
- /* let the ucode operate on its own */
- iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
+ iwl_trans_pcie_stop_device(trans);
+}
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
+{
iwl_disable_interrupts(trans);
+
+ /*
+ * in testing mode, the host stays awake and the
+ * hardware won't be reset (not even partially)
+ */
+ if (test)
+ return;
+
+ iwl_pcie_disable_ict(trans);
+
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * reset TX queues -- some of their registers reset during S3
+ * so if we don't reset everything here the D3 image would try
+ * to execute some invalid memory upon resume
+ */
+ iwl_trans_pcie_tx_reset(trans);
+
+ iwl_pcie_set_pwr(trans, true);
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int err;
- bool hw_rfkill;
+ u32 val;
+ int ret;
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ if (test) {
+ iwl_enable_interrupts(trans);
+ *status = IWL_D3_STATUS_ALIVE;
+ return 0;
+ }
- if (!trans_pcie->irq_requested) {
- tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_pcie_tasklet, (unsigned long)trans);
+ iwl_pcie_set_pwr(trans, false);
- iwl_pcie_alloc_ict(trans);
+ val = iwl_read32(trans, CSR_RESET);
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
+ *status = IWL_D3_STATUS_RESET;
+ return 0;
+ }
- err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
- IRQF_SHARED, DRV_NAME, trans);
- if (err) {
- IWL_ERR(trans, "Error allocating IRQ %d\n",
- trans_pcie->irq);
- goto error;
- }
+ /*
+ * Also enables interrupts - none will happen as the device doesn't
+ * know we're waking it up, only when the opmode actually tells it
+ * after this call.
+ */
+ iwl_pcie_reset_ict(trans);
- trans_pcie->irq_requested = true;
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret) {
+ IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
+ return ret;
+ }
+
+ iwl_trans_pcie_tx_reset(trans);
+
+ ret = iwl_pcie_rx_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
+ return ret;
}
+ *status = IWL_D3_STATUS_ALIVE;
+ return 0;
+}
+
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+{
+ bool hw_rfkill;
+ int err;
+
err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
- goto err_free_irq;
+ return err;
}
+ /* Reset the entire device */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ usleep_range(10, 15);
+
iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
-
- return err;
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-err_free_irq:
- trans_pcie->irq_requested = false;
- free_irq(trans_pcie->irq, trans);
-error:
- iwl_pcie_free_ict(trans);
- tasklet_kill(&trans_pcie->irq_tasklet);
- return err;
+ return 0;
}
-static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
- bool op_mode_leaving)
+static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ /* disable interrupts - don't enable HW RF kill interrupt */
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_apm_stop(trans);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
-
- if (!op_mode_leaving) {
- /*
- * Even if we stop the HW, we still want the RF kill
- * interrupt
- */
- iwl_enable_rfkill_int(trans);
-
- /*
- * Check again since the RF kill state may have changed while
- * all the interrupts were disabled, in this case we couldn't
- * receive the RF kill interrupt and update the state in the
- * op_mode.
- */
- hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- }
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -666,7 +1046,8 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
- iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
+ ((reg & 0x000FFFFF) | (3 << 24)));
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}
@@ -674,10 +1055,16 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
u32 val)
{
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x0000FFFF) | (3 << 24)));
+ ((addr & 0x000FFFFF) | (3 << 24)));
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+ WARN_ON(1);
+ return 0;
+}
+
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
@@ -703,19 +1090,32 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
trans_pcie->command_names = trans_cfg->command_names;
+ trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ * As this function may be called again in some corner cases don't
+ * do anything if NAPI was already initialized.
+ */
+ if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ init_dummy_netdev(&trans_pcie->napi_dev);
+ iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ &trans_pcie->napi_dev,
+ iwl_pcie_dummy_napi_poll, 64);
+ }
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ synchronize_irq(trans_pcie->pci_dev->irq);
+
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
- if (trans_pcie->irq_requested == true) {
- free_irq(trans_pcie->irq, trans);
- iwl_pcie_free_ict(trans);
- }
+ free_irq(trans_pcie->pci_dev->irq, trans);
+ iwl_pcie_free_ict(trans);
pci_disable_msi(trans_pcie->pci_dev);
iounmap(trans_pcie->hw_base);
@@ -723,134 +1123,244 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
pci_disable_device(trans_pcie->pci_dev);
kmem_cache_destroy(trans->dev_cmd_pool);
+ if (trans_pcie->napi.poll)
+ netif_napi_del(&trans_pcie->napi);
+
kfree(trans);
}
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (state)
- set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
else
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
}
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
+ unsigned long *flags)
{
- return 0;
+ int ret;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
+ /* this bit wakes up the NIC */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ * 5000 series and later (including 1000 series) have non-volatile SRAM,
+ * and do not save/restore SRAM when power cycling.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (unlikely(ret < 0)) {
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ if (!silent) {
+ u32 val = iwl_read32(trans, CSR_GP_CNTRL);
+ WARN_ONCE(1,
+ "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+ val);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+ return false;
+ }
+ }
+
+out:
+ /*
+ * Fool sparse by faking we release the lock - sparse will
+ * track nic_access anyway.
+ */
+ __release(&trans_pcie->reg_lock);
+ return true;
}
-static int iwl_trans_pcie_resume(struct iwl_trans *trans)
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+ unsigned long *flags)
{
- bool hw_rfkill;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwl_enable_rfkill_int(trans);
+ lockdep_assert_held(&trans_pcie->reg_lock);
- hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ /*
+ * Fool sparse by faking we acquiring the lock - sparse will
+ * track nic_access anyway.
+ */
+ __acquire(&trans_pcie->reg_lock);
- if (!hw_rfkill)
- iwl_enable_interrupts(trans);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
- return 0;
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ /*
+ * Above we read the CSR_GP_CNTRL register, which will flush
+ * any previous writes, but we need the write that clears the
+ * MAC_ACCESS_REQ bit to be performed before any other writes
+ * scheduled on different CPUs (after we drop reg_lock).
+ */
+ mmiowb();
+out:
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+}
+
+static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
+{
+ unsigned long flags;
+ int offs, ret = 0;
+ const u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+ vals ? vals[offs] : 0);
+ iwl_trans_release_nic_access(trans, &flags);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
}
-#endif /* CONFIG_PM_SLEEP */
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
+ u32 scd_sram_addr;
+ u8 buf[16];
int ret = 0;
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u8 wr_ptr;
+
if (cnt == trans_pcie->cmd_queue)
continue;
+ if (!test_bit(cnt, trans_pcie->queue_used))
+ continue;
+ if (!(BIT(cnt) & txq_bm))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+ while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+ if (WARN_ONCE(wr_ptr != write_ptr,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
msleep(1);
+ }
if (q->read_ptr != q->write_ptr) {
- IWL_ERR(trans, "fail to flush all tx fifo queues\n");
+ IWL_ERR(trans,
+ "fail to flush all tx fifo queues Q %d\n", cnt);
ret = -ETIMEDOUT;
break;
}
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
- return ret;
-}
-
-static const char *get_fh_string(int cmd)
-{
-#define IWL_CMD(x) case x: return #x
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-#undef IWL_CMD
-}
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
-{
- int i;
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
+ if (!ret)
+ return 0;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (buf) {
- int pos = 0;
- size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
+ scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
+ iwl_print_hex_error(trans, buf, sizeof(buf));
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
+ for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
- return pos;
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
+
+ if (cnt & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ cnt, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
}
-#endif
- IWL_ERR(trans, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- IWL_ERR(trans, " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
+ return ret;
+}
- return 0;
+static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
static const char *get_csr_string(int cmd)
@@ -879,6 +1389,7 @@ static const char *get_csr_string(int cmd)
IWL_CMD(CSR_GIO_CHICKEN_BITS);
IWL_CMD(CSR_ANA_PLL_CFG);
IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_MONITOR_STATUS_REG);
IWL_CMD(CSR_DBG_HPET_MEM_REG);
default:
return "UNKNOWN";
@@ -911,6 +1422,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
CSR_DRAM_INT_TBL_REG,
CSR_GIO_CHICKEN_BITS,
CSR_ANA_PLL_CFG,
+ CSR_MONITOR_STATUS_REG,
CSR_HW_REV_WA_REG,
CSR_DBG_HPET_MEM_REG
};
@@ -933,18 +1445,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
} while (0)
/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
@@ -952,7 +1453,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.open = simple_open, \
@@ -960,8 +1460,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
@@ -1142,41 +1640,23 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char *buf = NULL;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_pcie_dump_fh(trans, &buf);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
+ ssize_t ret;
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
return ret;
}
-static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
-
- if (!trans->op_mode)
- return -EAGAIN;
-
- local_bh_disable();
- iwl_op_mode_nic_error(trans->op_mode);
- local_bh_enable();
-
- return count;
-}
-
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
-DEBUGFS_WRITE_FILE_OPS(fw_restart);
/*
* Create the debugfs files and directories
@@ -1190,13 +1670,67 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
- DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
return 0;
err:
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
return -ENOMEM;
}
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+{
+ u32 cmdlen = 0;
+ int i;
+
+ for (i = 0; i < IWL_NUM_OF_TBS; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+
+ return cmdlen;
+}
+
+static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ void *buf, u32 buflen)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_fw_error_dump_data *data;
+ struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_fw_error_dump_txcmd *txcmd;
+ u32 len;
+ int i, ptr;
+
+ if (!buf)
+ return sizeof(*data) +
+ cmdq->q.n_window * (sizeof(*txcmd) +
+ TFD_MAX_PAYLOAD_SIZE);
+
+ len = 0;
+ data = buf;
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->q.write_ptr;
+ for (i = 0; i < cmdq->q.n_window; i++) {
+ u8 idx = get_cmd_index(&cmdq->q, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(ptr);
+ }
+ spin_unlock_bh(&cmdq->lock);
+
+ data->len = cpu_to_le32(len);
+ return sizeof(*data) + len;
+}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
struct dentry *dir)
@@ -1207,12 +1741,13 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
static const struct iwl_trans_ops trans_ops_pcie = {
.start_hw = iwl_trans_pcie_start_hw,
- .stop_hw = iwl_trans_pcie_stop_hw,
+ .op_mode_leave = iwl_trans_pcie_op_mode_leave,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
- .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
+ .d3_suspend = iwl_trans_pcie_d3_suspend,
+ .d3_resume = iwl_trans_pcie_d3_resume,
.send_cmd = iwl_trans_pcie_send_hcmd,
@@ -1226,17 +1761,22 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwl_trans_pcie_suspend,
- .resume = iwl_trans_pcie_resume,
-#endif
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,
.read_prph = iwl_trans_pcie_read_prph,
.write_prph = iwl_trans_pcie_write_prph,
+ .read_mem = iwl_trans_pcie_read_mem,
+ .write_mem = iwl_trans_pcie_write_mem,
.configure = iwl_trans_pcie_configure,
.set_pmi = iwl_trans_pcie_set_pmi,
+ .grab_nic_access = iwl_trans_pcie_grab_nic_access,
+ .release_nic_access = iwl_trans_pcie_release_nic_access,
+ .set_bits_mask = iwl_trans_pcie_set_bits_mask,
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .dump_data = iwl_trans_pcie_dump_data,
+#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -1250,26 +1790,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-
- if (!trans)
- return NULL;
+ if (!trans) {
+ err = -ENOMEM;
+ goto out;
+ }
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans->ops = &trans_ops_pcie;
trans->cfg = cfg;
+ trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
+ spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- /* W/A - seems to solve weird behavior. We need to remove this if we
- * don't want to stay in L1 all the time. This wastes a lot of power */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
+ err = pci_enable_device(pdev);
+ if (err)
goto out_no_pci;
+
+ if (!cfg->base_params->pcie_l1_allowed) {
+ /*
+ * W/A - seems to solve weird behavior. We need to remove this
+ * if we don't want to stay in L1 all the time. This wastes a
+ * lot of power.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
}
pci_set_master(pdev);
@@ -1306,6 +1854,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ trans->dev = &pdev->dev;
+ trans_pcie->pci_dev = pdev;
+ iwl_disable_interrupts(trans);
+
err = pci_enable_msi(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1317,9 +1869,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
- trans->dev = &pdev->dev;
- trans_pcie->irq = pdev->irq;
- trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1327,7 +1876,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- spin_lock_init(&trans->reg_lock);
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
@@ -1341,11 +1889,30 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
SLAB_HWCACHE_ALIGN,
NULL);
- if (!trans->dev_cmd_pool)
+ if (!trans->dev_cmd_pool) {
+ err = -ENOMEM;
goto out_pci_disable_msi;
+ }
+
+ if (iwl_pcie_alloc_ict(trans))
+ goto out_free_cmd_pool;
+
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (err) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+ goto out_free_ict;
+ }
+
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
return trans;
+out_free_ict:
+ iwl_pcie_free_ict(trans);
+out_free_cmd_pool:
+ kmem_cache_destroy(trans->dev_cmd_pool);
out_pci_disable_msi:
pci_disable_msi(pdev);
out_pci_release_regions:
@@ -1354,5 +1921,6 @@ out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
kfree(trans);
- return NULL;
+out:
+ return ERR_PTR(err);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6c5b867c353..038940afbdc 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -65,34 +65,40 @@
***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
+ * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < TFD_QUEUE_SIZE_MAX)
+ max = q->n_window;
+ else
+ max = TFD_QUEUE_SIZE_MAX - 1;
+
+ /*
+ * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
+ * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
}
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
{
- q->n_bd = count;
q->n_window = slots_num;
q->id = id;
- /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
- * and iwl_queue_dec_wrap are broken. */
- if (WARN_ON(!is_power_of_2(count)))
- return -EINVAL;
-
/* slots_num must be power-of-two size, otherwise
* get_cmd_index is broken. */
if (WARN_ON(!is_power_of_2(slots_num)))
@@ -160,7 +166,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
- iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -173,9 +179,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
- iwl_read_targ_mem(trans,
- trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(i));
+ iwl_trans_read_mem32(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
if (i & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
@@ -185,20 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
i, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
}
for (i = q->read_ptr; i != q->write_ptr;
- i = iwl_queue_inc_wrap(i, q->n_bd)) {
- struct iwl_tx_cmd *tx_cmd =
- (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+ i = iwl_queue_inc_wrap(i))
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
- get_unaligned_le32(&tx_cmd->scratch));
- }
+ le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_force_nmi(trans);
}
/*
@@ -227,17 +230,20 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
switch (sec_ctl & TX_CMD_SEC_MSK) {
case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
+ len += IEEE80211_CCMP_MIC_LEN;
break;
case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
+ len += IEEE80211_TKIP_ICV_LEN;
break;
case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -275,50 +281,64 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
/*
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->q.id;
- if (txq->need_update == 0)
- return;
-
- if (trans->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- } else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
- /* if we're trying to save power */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ lockdep_assert_held(&txq->lock);
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. NIC is woken up for CMD regardless of shadow outside this function
+ * 3. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ txq_id != trans_pcie->cmd_queue &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
+ * wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part.
*/
- } else
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+ txq_id, reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ txq->need_update = true;
+ return;
+ }
+ }
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = &trans_pcie->txq[i];
+
+ spin_lock_bh(&txq->lock);
+ if (trans_pcie->txq[i].need_update) {
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ trans_pcie->txq[i].need_update = false;
+ }
+ spin_unlock_bh(&txq->lock);
}
- txq->need_update = 0;
}
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -333,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
return addr;
}
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
dma_addr_t addr, u16 len)
{
@@ -361,8 +374,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
}
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
- enum dma_data_direction dma_dir)
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfd *tfd)
{
int i;
int num_tbs;
@@ -376,17 +389,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
return;
}
- /* Unmap tx_cmd */
- if (num_tbs)
- dma_unmap_single(trans->dev,
- dma_unmap_addr(meta, mapping),
- dma_unmap_len(meta, len),
- DMA_BIDIRECTIONAL);
+ /* first TB is never freed - it's the scratchbuf data */
- /* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
- iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
+ iwl_pcie_tfd_tb_get_len(tfd, i),
+ DMA_TO_DEVICE);
tfd->num_tbs = 0;
}
@@ -400,20 +408,22 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
- enum dma_data_direction dma_dir)
+static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
- /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
int rd_ptr = txq->q.read_ptr;
int idx = get_cmd_index(&txq->q, rd_ptr);
lockdep_assert_held(&txq->lock);
- /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
- dma_dir);
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
/* free SKB */
if (txq->entries) {
@@ -433,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
}
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
- dma_addr_t addr, u16 len, u8 reset)
+ dma_addr_t addr, u16 len, bool reset)
{
struct iwl_queue *q;
struct iwl_tfd *tfd, *tfd_tmp;
@@ -455,13 +465,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return -EINVAL;
}
- if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+ if (WARN(addr & ~IWL_TX_DMA_MASK,
+ "Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(trans, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
@@ -473,6 +480,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ size_t scratchbuf_sz;
int i;
if (WARN_ON(txq->entries || txq->tfds))
@@ -504,13 +512,27 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
&txq->q.dma_addr, GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ if (!txq->tfds)
goto error;
- }
+
+ BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
+ BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
+ sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch));
+
+ scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
+
+ txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
+ &txq->scratchbufs_dma,
+ GFP_KERNEL);
+ if (!txq->scratchbufs)
+ goto err_free_tfds;
+
txq->q.id = txq_id;
return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
error:
if (txq->entries && txq_id == trans_pcie->cmd_queue)
for (i = 0; i < slots_num; i++)
@@ -527,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
{
int ret;
- txq->need_update = 0;
+ txq->need_update = false;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
+ ret = iwl_queue_init(&txq->q, slots_num, txq_id);
if (ret)
return ret;
@@ -559,25 +580,19 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
- enum dma_data_direction dma_dir;
-
- if (!q->n_bd)
- return;
-
- /* In the command queue, all the TBs are mapped as BIDI
- * so unmap them as such.
- */
- if (txq_id == trans_pcie->cmd_queue)
- dma_dir = DMA_BIDIRECTIONAL;
- else
- dma_dir = DMA_TO_DEVICE;
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
- iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, q->read_ptr);
+ iwl_pcie_txq_free_tfd(trans, txq);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
}
+ txq->active = false;
spin_unlock_bh(&txq->lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_wake_queue(trans, txq);
}
/*
@@ -604,15 +619,20 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq_id == trans_pcie->cmd_queue)
for (i = 0; i < txq->q.n_window; i++) {
kfree(txq->entries[i].cmd);
- kfree(txq->entries[i].copy_cmd);
kfree(txq->entries[i].free_buf);
}
/* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, sizeof(struct iwl_tfd) *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->q.dma_addr);
+ txq->q.dma_addr = 0;
+ txq->tfds = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(*txq->scratchbufs) * txq->q.n_window,
+ txq->scratchbufs, txq->scratchbufs_dma);
}
kfree(txq->entries);
@@ -638,9 +658,11 @@ static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 a;
+ int nq = trans->cfg->base_params->num_of_queues;
int chan;
u32 reg_val;
+ int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
+ SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
@@ -652,20 +674,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
WARN_ON(scd_base_addr != 0 &&
scd_base_addr != trans_pcie->scd_base_addr);
- a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- /* reset tx status memory */
- for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(
- trans->cfg->base_params->num_of_queues);
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
+ /* reset context data, TX status and translation data */
+ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
@@ -673,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+ if (trans->cfg->base_params->scd_chain_ext_wa)
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
trans_pcie->cmd_fifo);
@@ -693,8 +706,32 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+ iwl_pcie_txq_unmap(trans, txq_id);
+ txq->q.read_ptr = 0;
+ txq->q.write_ptr = 0;
+ }
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
}
/*
@@ -704,10 +741,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ch, txq_id, ret;
- unsigned long flags;
/* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_txq_set_sched(trans, 0);
@@ -724,13 +760,19 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
}
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
- if (!trans_pcie->txq) {
- IWL_WARN(trans,
- "Stopping tx queues that aren't allocated...\n");
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* This can happen: start_hw, stop_device */
+ if (!trans_pcie->txq)
return 0;
- }
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -803,7 +845,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
- ret = ENOMEM;
+ ret = -ENOMEM;
goto error;
}
@@ -832,7 +874,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
int txq_id, slots_num;
- unsigned long flags;
bool alloc = false;
if (!trans_pcie->txq) {
@@ -842,7 +883,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
iwl_write_prph(trans, SCD_TXFACT, 0);
@@ -851,7 +892,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -896,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- /* n_bd is usually 256 => n_bd - 1 = 0xff */
- int tfd_num = ssn & (txq->q.n_bd - 1);
+ int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
struct iwl_queue *q = &txq->q;
int last_to_free;
@@ -905,7 +945,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return;
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
+
+ if (!txq->active) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+ txq_id, ssn);
+ goto out;
+ }
if (txq->q.read_ptr == tfd_num)
goto out;
@@ -915,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+ last_to_free = iwl_queue_dec_wrap(tfd_num);
if (!iwl_queue_used(q, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
+ __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
goto out;
}
@@ -930,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
for (;
q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
continue;
@@ -941,7 +987,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
- iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+ iwl_pcie_txq_free_tfd(trans, txq);
}
iwl_pcie_txq_progress(trans_pcie, txq);
@@ -949,7 +995,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
out:
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
/*
@@ -964,28 +1010,40 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
+ unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
- if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, q->n_bd,
+ __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_force_nmi(trans);
}
}
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ q->read_ptr == q->write_ptr) {
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ WARN_ON(!trans_pcie->cmd_in_flight);
+ trans_pcie->cmd_in_flight = false;
+ __iwl_trans_pcie_clear_bit(trans,
+ CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+
iwl_pcie_txq_progress(trans_pcie, txq);
}
@@ -1002,14 +1060,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
- tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
+ tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
- iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
+ iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
return 0;
}
@@ -1025,6 +1083,10 @@ static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
+
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn)
{
@@ -1041,7 +1103,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
/* If this queue is mapped to a certain station: it is an AGG queue */
- if (sta_id != IWL_INVALID_STATION) {
+ if (sta_id >= 0) {
u16 ra_tid = BUILD_RAxTID(sta_id, tid);
/* Map receiver-address / traffic-ID to this queue */
@@ -1049,6 +1111,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
/* enable aggregations for the queue */
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ trans_pcie->txq[txq_id].ampdu = true;
} else {
/*
* disable aggregations for the queue, this will also make the
@@ -1056,6 +1119,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
* non-AGG queue.
*/
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+ ssn = trans_pcie->txq[txq_id].q.read_ptr;
}
/* Place first TFD at index corresponding to start sequence number.
@@ -1068,9 +1133,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
@@ -1083,6 +1148,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
(fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
SCD_QUEUE_STTS_REG_MSK);
+ trans_pcie->txq[txq_id].active = true;
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
txq_id, fifo, ssn & 0xff);
}
@@ -1094,17 +1160,25 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
- WARN_ONCE(1, "queue %d not used", txq_id);
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", txq_id);
return;
}
iwl_pcie_txq_set_inactive(trans, txq_id);
- _iwl_write_targ_mem_dwords(trans, stts_addr,
- zero_val, ARRAY_SIZE(zero_val));
+ iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ ARRAY_SIZE(zero_val));
iwl_pcie_txq_unmap(trans, txq_id);
+ trans_pcie->txq[txq_id].ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -1114,10 +1188,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
- * @cmd: a point to the ucode command structure
+ * @cmd: a pointer to the ucode command structure
*
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
@@ -1128,23 +1202,41 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
- u16 copy_size, cmd_size;
+ u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false;
- int i;
+ int i, ret;
u32 cmd_pos;
+ const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+ u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
copy_size = sizeof(out_cmd->hdr);
cmd_size = sizeof(out_cmd->hdr);
/* need one for the header if the first is NOCOPY */
- BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
+ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
+
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ cmddata[i] = cmd->data[i];
+ cmdlen[i] = cmd->len[i];
- for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
+
+ /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
+ if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+ int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+ if (copy > cmdlen[i])
+ copy = cmdlen[i];
+ cmdlen[i] -= copy;
+ cmddata[i] += copy;
+ copy_size += copy;
+ }
+
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true;
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
@@ -1164,7 +1256,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- dup_buf = kmemdup(cmd->data[i], cmd->len[i],
+ dup_buf = kmemdup(cmddata[i], cmdlen[i],
GFP_ATOMIC);
if (!dup_buf)
return -ENOMEM;
@@ -1174,7 +1266,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
idx = -EINVAL;
goto free_dup_buf;
}
- copy_size += cmd->len[i];
+ copy_size += cmdlen[i];
}
cmd_size += cmd->len[i];
}
@@ -1221,30 +1313,41 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/* and copy the data that needs to be copied */
cmd_pos = offsetof(struct iwl_device_cmd, payload);
- for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ copy_size = sizeof(out_cmd->hdr);
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ int copy;
+
if (!cmd->len[i])
continue;
- if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP))
- break;
- memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
- cmd_pos += cmd->len[i];
- }
- WARN_ON_ONCE(txq->entries[idx].copy_cmd);
+ /* copy everything if not nocopy/dup */
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))) {
+ copy = cmd->len[i];
- /*
- * since out_cmd will be the source address of the FH, it will write
- * the retry count there. So when the user needs to receivce the HCMD
- * that corresponds to the response in the response handler, it needs
- * to set CMD_WANT_HCMD.
- */
- if (cmd->flags & CMD_WANT_HCMD) {
- txq->entries[idx].copy_cmd =
- kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
- if (unlikely(!txq->entries[idx].copy_cmd)) {
- idx = -ENOMEM;
- goto out;
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+ copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
+ * in total (for the scratchbuf handling), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+ copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
}
}
@@ -1254,22 +1357,35 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
- phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
- idx = -ENOMEM;
- goto out;
- }
-
- dma_unmap_addr_set(out_meta, mapping, phys_addr);
- dma_unmap_len_set(out_meta, len, copy_size);
+ /* start the TFD with the scratchbuf */
+ scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
+ memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
+ iwl_pcie_txq_build_tfd(trans, txq,
+ iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
+ scratch_size, true);
+
+ /* map first command fragment, if any remains */
+ if (copy_size > scratch_size) {
+ phys_addr = dma_map_single(trans->dev,
+ ((u8 *)&out_cmd->hdr) + scratch_size,
+ copy_size - scratch_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(trans->dev, phys_addr)) {
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr]);
+ idx = -ENOMEM;
+ goto out;
+ }
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
+ copy_size - scratch_size, false);
+ }
- for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
- const void *data = cmd->data[i];
+ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+ const void *data = cmddata[i];
- if (!cmd->len[i])
+ if (!cmdlen[i])
continue;
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
IWL_HCMD_DFL_DUP)))
@@ -1277,16 +1393,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
phys_addr = dma_map_single(trans->dev, (void *)data,
- cmd->len[i], DMA_BIDIRECTIONAL);
+ cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_pcie_tfd_unmap(trans, out_meta,
- &txq->tfds[q->write_ptr],
- DMA_BIDIRECTIONAL);
+ &txq->tfds[q->write_ptr]);
idx = -ENOMEM;
goto out;
}
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
out_meta->flags = cmd->flags;
@@ -1294,19 +1409,46 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
kfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
- txq->need_update = 1;
-
- trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
- &out_cmd->hdr, copy_size);
+ trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
+ */
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ !trans_pcie->cmd_in_flight) {
+ trans_pcie->cmd_in_flight = true;
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ trans_pcie->cmd_in_flight = false;
+ idx = -EIO;
+ goto out;
+ }
+ }
+
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
out:
spin_unlock_bh(&txq->lock);
free_dup_buf:
@@ -1350,13 +1492,13 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
return;
}
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
- iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+ iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -1371,12 +1513,12 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
- if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
+ if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
IWL_WARN(trans,
"HCMD_ACTIVE already clear for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
}
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
wake_up(&trans_pcie->wait_command_queue);
@@ -1384,10 +1526,10 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->flags = 0;
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1419,12 +1561,11 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
- if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))) {
- IWL_ERR(trans, "Command %s: a command is already active!\n",
- get_cmd_string(trans_pcie, cmd->id));
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n",
+ get_cmd_string(trans_pcie, cmd->id)))
return -EIO;
- }
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->id));
@@ -1432,7 +1573,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(trans_pcie, cmd->id), ret);
@@ -1440,41 +1581,41 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
- struct iwl_txq *txq =
- &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_queue *q = &txq->q;
- IWL_ERR(trans,
- "Error sending %s: time out after %dms.\n",
- get_cmd_string(trans_pcie, cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ get_cmd_string(trans_pcie, cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_ERR(trans,
- "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
-
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- IWL_DEBUG_INFO(trans,
- "Clearing HCMD_ACTIVE for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -ETIMEDOUT;
+
+ iwl_force_nmi(trans);
+ iwl_trans_fw_error(trans);
+
+ goto cancel;
}
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
get_cmd_string(trans_pcie, cmd->id));
+ dump_stack();
ret = -EIO;
goto cancel;
}
- if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
@@ -1511,13 +1652,12 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
- return -EIO;
-
- if (test_bit(STATUS_RFKILL, &trans_pcie->status))
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
return -ERFKILL;
+ }
if (cmd->flags & CMD_ASYNC)
return iwl_pcie_send_hcmd_async(trans, cmd);
@@ -1535,22 +1675,20 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
struct iwl_queue *q;
- dma_addr_t phys_addr = 0;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u8 wait_write_ptr = 0;
+ dma_addr_t tb0_phys, tb1_phys, scratch_phys;
+ void *tb1_addr;
+ u16 len, tb1_len, tb2_len;
+ bool wait_write_ptr;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
- u16 __maybe_unused wifi_seq;
+ u16 wifi_seq;
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
- if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
- WARN_ON_ONCE(1);
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ "TX on unused queue %d\n", txq_id))
return -EINVAL;
- }
spin_lock(&txq->lock);
@@ -1559,106 +1697,89 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* the BA.
* Check here that the packets are in the right place on the ring.
*/
-#ifdef CONFIG_IWLWIFI_DEBUG
- wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
- ((wifi_seq & 0xff) != q->write_ptr),
+ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE(txq->ampdu &&
+ (wifi_seq & 0xff) != q->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
txq_id, wifi_seq, q->write_ptr);
-#endif
/* Set up driver data for this TFD */
txq->entries[q->write_ptr].skb = skb;
txq->entries[q->write_ptr].cmd = dev_cmd;
- dev_cmd->hdr.cmd = REPLY_TX;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr)));
+ tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
+ scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[q->write_ptr].meta;
/*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
*/
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
+ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
+ hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
+ tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
+ if (tb1_len != len)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = dma_map_single(trans->dev,
- &dev_cmd->hdr, firstlen,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
- goto out_err;
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
+ /* The first TB points to the scratchbuf data - min_copy bytes */
+ memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
+ IWL_HCMD_SCRATCHBUF_SIZE);
+ iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
+ IWL_HCMD_SCRATCHBUF_SIZE, true);
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ /* there must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
- secondlen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
- dma_unmap_single(trans->dev,
- dma_unmap_addr(out_meta, mapping),
- dma_unmap_len(out_meta, len),
- DMA_BIDIRECTIONAL);
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
+ tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
+ goto out_err;
+ iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
+
+ /*
+ * Set up TFD's third entry to point directly to remainder
+ * of skb, if any (802.11 null frames have no payload).
+ */
+ tb2_len = skb->len - hdr_len;
+ if (tb2_len > 0) {
+ dma_addr_t tb2_phys = dma_map_single(trans->dev,
+ skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr]);
goto out_err;
}
+ iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
}
- /* Attach buffers to TFD */
- iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
- if (secondlen > 0)
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
- dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
-
trace_iwlwifi_dev_tx(trans->dev, skb,
&txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
- &dev_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
+ &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
+ skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
- skb->data + hdr_len, secondlen);
+ skb->data + hdr_len, tb2_len);
+
+ wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1666,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ if (!wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
+ * and we will get a TX status notification eventually.
*/
if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr) {
- txq->need_update = 1;
+ if (wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- } else {
+ else
iwl_stop_queue(trans, txq);
- }
}
spin_unlock(&txq->lock);
return 0;