aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi/pcie
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c207
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h103
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c581
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c823
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c449
5 files changed, 1328 insertions, 835 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 81f3ea5b09a..98950e45c7b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-drv.h"
@@ -130,6 +131,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
@@ -138,13 +140,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6x00 Series */
{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
@@ -152,12 +157,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -239,8 +248,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6x35 Series */
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
@@ -257,62 +269,216 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
-/* 7000 Series */
+/* 7260 Series */
{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
/* 3160 Series */
{IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
{IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
+
+/* 7265 Series */
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#ifdef CONFIG_ACPI
+#define SPL_METHOD "SPLC"
+#define SPL_DOMAINTYPE_MODULE BIT(0)
+#define SPL_DOMAINTYPE_WIFI BIT(1)
+#define SPL_DOMAINTYPE_WIGIG BIT(2)
+#define SPL_DOMAINTYPE_RFEM BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+ union acpi_object *limits, *domain_type, *power_limit;
+
+ if (splx->type != ACPI_TYPE_PACKAGE ||
+ splx->package.count != 2 ||
+ splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splx->package.elements[0].integer.value != 0) {
+ IWL_ERR(trans, "Unsupported splx structure\n");
+ return 0;
+ }
+
+ limits = &splx->package.elements[1];
+ if (limits->type != ACPI_TYPE_PACKAGE ||
+ limits->package.count < 2 ||
+ limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ IWL_ERR(trans, "Invalid limits element\n");
+ return 0;
+ }
+
+ domain_type = &limits->package.elements[0];
+ power_limit = &limits->package.elements[1];
+ if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
+ return 0;
+ }
+
+ return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+ struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+ if (!pxsx_handle) {
+ IWL_DEBUG_INFO(trans,
+ "Could not retrieve root port ACPI handle\n");
+ return;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_INFO(trans, "SPL method not found\n");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
+ return;
+ }
+
+ trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
+ trans->dflt_pwr_limit);
+ kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -324,19 +490,21 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
- if (iwl_trans == NULL)
- return -ENOMEM;
+ if (IS_ERR(iwl_trans))
+ return PTR_ERR(iwl_trans);
pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
- if (IS_ERR_OR_NULL(trans_pcie->drv)) {
+ if (IS_ERR(trans_pcie->drv)) {
ret = PTR_ERR(trans_pcie->drv);
goto out_free_trans;
}
+ set_dflt_pwr_limit(iwl_trans, pdev);
+
/* register transport layer debugfs here */
ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
if (ret)
@@ -348,7 +516,6 @@ out_free_drv:
iwl_drv_stop(trans_pcie->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
- pci_set_drvdata(pdev, NULL);
return ret;
}
@@ -359,29 +526,25 @@ static void iwl_pci_remove(struct pci_dev *pdev)
iwl_drv_stop(trans_pcie->drv);
iwl_trans_pcie_free(trans);
-
- pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
static int iwl_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
-
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_trans_suspend(iwl_trans);
+ return 0;
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -394,7 +557,15 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_trans_resume(iwl_trans);
+ if (!trans->op_mode)
+ return 0;
+
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+ return 0;
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index b654dcdd048..6c22b23a284 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -102,7 +102,7 @@ struct iwl_rxq {
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
- int need_update;
+ bool need_update;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@@ -117,21 +117,19 @@ struct iwl_dma_ptr {
/**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
+static inline int iwl_queue_inc_wrap(int index)
{
- return ++index & (n_bd - 1);
+ return ++index & (TFD_QUEUE_SIZE_MAX - 1);
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
+static inline int iwl_queue_dec_wrap(int index)
{
- return --index & (n_bd - 1);
+ return --index & (TFD_QUEUE_SIZE_MAX - 1);
}
struct iwl_cmd_meta {
@@ -145,13 +143,13 @@ struct iwl_cmd_meta {
*
* Contains common data for Rx and Tx queues.
*
- * Note the difference between n_bd and n_window: the hardware
- * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
* the software buffers (in the variables @meta, @txb in struct
* iwl_txq) only have 32 entries, while the HW buffers (@tfds in
* the same struct) have 256.
@@ -162,7 +160,6 @@ struct iwl_cmd_meta {
* data is a window overlayed over the HW queue.
*/
struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
/* use for monitoring and recovering the stuck queue */
@@ -231,7 +228,7 @@ struct iwl_txq {
spinlock_t lock;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
- u8 need_update;
+ bool need_update;
u8 active;
bool ampdu;
};
@@ -256,13 +253,13 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
- * @status - transport specific status flags
* @cmd_queue - command queue number
* @rx_buf_size_8k: 8 kB RX buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
* @reg_lock: protect hw register access
+ * @cmd_in_flight: true when we have a host command in flight
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
@@ -270,11 +267,13 @@ struct iwl_trans_pcie {
struct iwl_trans *trans;
struct iwl_drv *drv;
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* INT ICT Table */
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
int ict_index;
- u32 inta;
bool use_ict;
struct isr_statistics isr_stats;
@@ -296,7 +295,6 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t wait_command_queue;
- unsigned long status;
u8 cmd_queue;
u8 cmd_fifo;
u8 n_no_reclaim_cmds;
@@ -306,31 +304,14 @@ struct iwl_trans_pcie {
bool bc_table_dword;
u32 rx_page_order;
- const char **command_names;
+ const char *const *command_names;
/* queue watchdog */
unsigned long wd_timeout;
/*protect hw register */
spinlock_t reg_lock;
-};
-
-/**
- * enum iwl_pcie_status: status of the PCIe transport
- * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
- * @STATUS_DEVICE_ENABLED: APM is enabled
- * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
- * @STATUS_INT_ENABLED: interrupts are enabled
- * @STATUS_RFKILL: the HW RFkill switch is in KILL position
- * @STATUS_FW_ERROR: the fw is in error state
- */
-enum iwl_pcie_status {
- STATUS_HCMD_ACTIVE,
- STATUS_DEVICE_ENABLED,
- STATUS_TPOWER_PMI,
- STATUS_INT_ENABLED,
- STATUS_RFKILL,
- STATUS_FW_ERROR,
+ bool cmd_in_flight;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -363,7 +344,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
******************************************************/
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr(int irq, void *data);
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
@@ -381,7 +362,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
@@ -389,10 +370,16 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
/*****************************************************
* Error handling
******************************************************/
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_pcie_dump_csr(struct iwl_trans *trans);
/*****************************************************
@@ -400,8 +387,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
/* disable interrupts from uCode/NIC to host */
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
@@ -418,14 +404,18 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+ set_bit(STATUS_INT_ENABLED, &trans->status);
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
@@ -478,4 +468,33 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fd848cd1583..a2698e5e062 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -112,15 +112,16 @@
*/
static int iwl_rxq_space(const struct iwl_rxq *rxq)
{
- int s = rxq->read - rxq->write;
-
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ /* Make sure RX_QUEUE_SIZE is a power of 2 */
+ BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+
+ /*
+ * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+ * between empty and completely full queues.
+ * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+ * defined for negative dividends.
+ */
+ return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
}
/*
@@ -144,55 +145,52 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
{
- unsigned long flags;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 reg;
- spin_lock_irqsave(&rxq->lock, flags);
+ lockdep_assert_held(&rxq->lock);
- if (rxq->need_update == 0)
- goto exit_unlock;
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+ reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ rxq->need_update = true;
+ return;
+ }
+ }
- if (trans->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- /* Device expects a multiple of 8 */
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- } else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
+ rxq->write_actual = round_down(rxq->write, 8);
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+}
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- rxq->write_actual);
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- rxq->write_actual = (rxq->write & ~0x7);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
- rxq->write_actual);
- }
- }
- rxq->need_update = 0;
+ spin_lock(&rxq->lock);
+
+ if (!rxq->need_update)
+ goto exit_unlock;
+
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ rxq->need_update = false;
exit_unlock:
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -211,7 +209,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
/*
* If the device isn't enabled - not need to try to add buffers...
@@ -221,10 +218,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
- if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
@@ -241,7 +238,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
@@ -250,10 +247,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
}
}
@@ -272,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
return;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
@@ -310,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
return;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@@ -331,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
@@ -342,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
}
@@ -378,21 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
-
- iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- iwl_pcie_rxq_restock(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-}
-
-static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
{
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_restock(trans);
}
@@ -402,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_pcie_rx_replenish(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -488,6 +471,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ /* W/A for interrupt coalescing bug in 7260 and 3160 */
+ if (trans->cfg->host_interrupt_operation_mode)
+ iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@@ -509,7 +496,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i, err;
- unsigned long flags;
if (!rxq->bd) {
err = iwl_pcie_rx_alloc(trans);
@@ -517,7 +503,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
@@ -533,16 +519,15 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans);
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- rxq->need_update = 1;
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
return 0;
}
@@ -551,7 +536,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -562,9 +546,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
cancel_work_sync(&trans_pcie->rx_replenish);
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
@@ -587,7 +571,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
@@ -620,7 +603,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
pkt->hdr.cmd);
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len = iwl_rx_packet_len(pkt);
len += sizeof(u32); /* account for status word */
trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
@@ -689,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -710,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
}
/*
@@ -725,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
u32 count = 8;
int total_empty;
+restart:
+ spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -759,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_pcie_rx_replenish_now(trans);
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
count = 0;
+ goto restart;
}
}
}
/* Backtrack one entry */
rxq->read = i;
+ spin_unlock(&rxq->lock);
+
if (fill_rx)
- iwl_pcie_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
else
iwl_pcie_rxq_restock(trans);
+
+ if (trans_pcie->napi.poll)
+ napi_gro_flush(&trans_pcie->napi, false);
}
/*
@@ -786,22 +776,103 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
wake_up(&trans_pcie->wait_command_queue);
return;
}
iwl_pcie_dump_csr(trans);
- iwl_pcie_dump_fh(trans, NULL);
-
- set_bit(STATUS_FW_ERROR, &trans_pcie->status);
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- wake_up(&trans_pcie->wait_command_queue);
+ iwl_dump_fh(trans, NULL);
local_bh_disable();
- iwl_op_mode_nic_error(trans->op_mode);
+ /* The STATUS_FW_ERROR bit is set in this function. This must happen
+ * before we wake up the command caller, to ensure a proper cleanup. */
+ iwl_trans_fw_error(trans);
local_bh_enable();
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+ u32 inta;
+
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(trans, CSR_INT);
+
+ /* the thread will service interrupts and re-enable them */
+ return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+ u32 val = 0;
+ u32 read;
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+ if (!read)
+ return 0;
+
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index, read);
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
+
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+ read);
+ } while (read);
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ return inta;
}
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
@@ -811,12 +882,60 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- unsigned long flags;
- u32 i;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (likely(trans_pcie->use_ict))
+ inta = iwl_pcie_int_cause_ict(trans);
+ else
+ inta = iwl_pcie_int_cause_non_ict(trans);
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+ inta, trans_pcie->inta_mask,
+ iwl_read32(trans, CSR_INT_MASK),
+ iwl_read32(trans, CSR_FH_INT_STATUS));
+ if (inta & (~trans_pcie->inta_mask))
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta & (~trans_pcie->inta_mask));
+ }
+
+ inta &= trans_pcie->inta_mask;
+
+ /*
+ * Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC.
+ */
+ if (unlikely(!inta)) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ /*
+ * Re-enable interrupts here since we don't
+ * have anything to service
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /*
+ * Hardware disappeared. It might have
+ * already raised an interrupt.
+ */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ spin_unlock(&trans_pcie->irq_lock);
+ goto out;
+ }
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -829,19 +948,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(trans, CSR_INT,
- trans_pcie->inta | ~trans_pcie->inta_mask);
-
- inta = trans_pcie->inta;
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
inta, iwl_read32(trans, CSR_INT_MASK));
- /* saved interrupt in inta variable now we can reset trans_pcie->inta */
- trans_pcie->inta = 0;
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -886,16 +999,16 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rfkill++;
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill) {
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))
+ set_bit(STATUS_RFKILL, &trans->status);
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
wake_up(&trans_pcie->wait_command_queue);
} else {
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
}
handled |= CSR_INT_BIT_RF_KILL;
@@ -920,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
isr_stats->wakeup++;
@@ -960,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_pcie_rx_handle(trans);
-
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
@@ -974,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
+
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans);
+ local_bh_enable();
}
/* This "Tx" DMA channel is used only for loading uCode */
@@ -999,7 +1113,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
iwl_enable_interrupts(trans);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
@@ -1016,11 +1130,6 @@ out:
*
******************************************************************************/
-/* a device (PCI-E) page is 4096 bytes long */
-#define ICT_SHIFT 12
-#define ICT_SIZE (1 << ICT_SHIFT)
-#define ICT_COUNT (ICT_SIZE / sizeof(u32))
-
/* Free dram table */
void iwl_pcie_free_ict(struct iwl_trans *trans)
{
@@ -1045,7 +1154,7 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_alloc_coherent(trans->dev, ICT_SIZE,
+ dma_zalloc_coherent(trans->dev, ICT_SIZE,
&trans_pcie->ict_tbl_dma,
GFP_KERNEL);
if (!trans_pcie->ict_tbl)
@@ -1057,17 +1166,10 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
return -EINVAL;
}
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
- (unsigned long long)trans_pcie->ict_tbl_dma);
-
- IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
-
- /* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- trans_pcie->ict_index = 0;
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ trans_pcie->ict_tbl);
- /* add periodic RX interrupt */
- trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
return 0;
}
@@ -1078,12 +1180,11 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
- unsigned long flags;
if (!trans_pcie->ict_tbl)
return;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -1100,124 +1201,26 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
-/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_pcie_isr(int irq, void *data)
-{
- struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 inta, inta_mask;
-
- lockdep_assert_held(&trans_pcie->irq_lock);
-
- trace_iwlwifi_dev_irq(trans->dev);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the irq thread will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(trans, CSR_INT);
-
- if (inta & (~inta_mask)) {
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt (0x%08x)...Ack and ignore\n",
- inta & (~inta_mask));
- iwl_write32(trans, CSR_INT, inta & (~inta_mask));
- inta &= inta_mask;
- }
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- return IRQ_HANDLED;
- }
-
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans,
- "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask,
- iwl_read32(trans, CSR_FH_INT_STATUS));
-
- trans_pcie->inta |= inta;
- /* the thread will service interrupts and re-enable them */
- if (likely(inta))
- return IRQ_WAKE_THREAD;
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
- return IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- return IRQ_NONE;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
+irqreturn_t iwl_pcie_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie;
- u32 inta;
- u32 val = 0;
- u32 read;
- unsigned long flags;
if (!trans)
return IRQ_NONE;
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (unlikely(!trans_pcie->use_ict)) {
- irqreturn_t ret = iwl_pcie_isr(irq, data);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
- }
-
- trace_iwlwifi_dev_irq(trans->dev);
-
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
@@ -1225,81 +1228,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
*/
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
- if (!read) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /*
- * Collect all entries up to the first 0, starting from ict_index;
- * note we already read at ict_index.
- */
- do {
- val |= read;
- IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index, read);
- trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
- trans_pcie->ict_index =
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
-
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
- read);
- } while (read);
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
- inta, trans_pcie->inta_mask, val);
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
- iwl_read32(trans, CSR_INT_MASK));
-
- inta &= trans_pcie->inta_mask;
- trans_pcie->inta |= inta;
-
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta)) {
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_WAKE_THREAD;
- } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(trans);
- }
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 826c15602c4..788085bc65d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,33 +73,21 @@
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-agn-hw.h"
+#include "iwl-fw-error-dump.h"
#include "internal.h"
-static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
- u32 reg, u32 mask, u32 value)
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
{
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (2 << 28)));
+ return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
}
-static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
-}
-
-static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+ iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+ ((reg & 0x0000ffff) | (3 << 28)));
}
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
@@ -150,7 +138,6 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
*/
static int iwl_pcie_apm_init(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
@@ -160,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
@@ -206,26 +194,165 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
goto out;
}
+ if (trans->cfg->host_interrupt_operation_mode) {
+ /*
+ * This is a bit of an abuse - This is needed for 7260 / 3160
+ * only check host_interrupt_operation_mode even if this is
+ * not related to host_interrupt_operation_mode.
+ *
+ * Enable the oscillator to count wake up time for L1 exit. This
+ * consumes slightly more power (100uA) - but allows to be sure
+ * that we wake up from L1 on time.
+ *
+ * This looks weird: read twice the same register, discard the
+ * value, set a bit, and yet again, read that same register
+ * just to discard the value. But that's the way the hardware
+ * seems to like it.
+ */
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+ iwl_read_prph(trans, OSC_CLK);
+ iwl_read_prph(trans, OSC_CLK);
+ }
+
/*
* Enable DMA clock and wait for it to stabilize.
*
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
- /* Disable L1-Active */
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
+
+ set_bit(STATUS_DEVICE_ENABLED, &trans->status);
out:
return ret;
}
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+ int ret;
+ u32 apmg_gp1_reg;
+ u32 apmg_xtal_cfg_reg;
+ u32 dl_cfg_reg;
+
+ /* Force XTAL ON */
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+ /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is possible.
+ */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (WARN_ON(ret < 0)) {
+ IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ return;
+ }
+
+ /*
+ * Clear "disable persistence" to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+ /*
+ * Force APMG XTAL to be active to prevent its disabling by HW
+ * caused by APMG idle state.
+ */
+ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+ SHR_APMG_XTAL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg |
+ SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+ /*
+ * Reset entire device again - do controller reset (results in
+ * SHRD_HW_RST). Turn MAC off before proceeding.
+ */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /* Enable LP XTAL by indirect access through CSR */
+ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+ SHR_APMG_GP1_WF_XTAL_LP_EN |
+ SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+ /* Clear delay line clock power up */
+ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+ ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+ /*
+ * Enable persistence mode to avoid LP XTAL resetting when
+ * SHRD_HW_RST is applied in S3.
+ */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* Activates XTAL resources monitor */
+ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+ CSR_MONITOR_XTAL_RESOURCES);
+
+ /* Release XTAL ON request */
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ udelay(10);
+
+ /* Release APMG XTAL */
+ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+ apmg_xtal_cfg_reg &
+ ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -246,14 +373,18 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
static void iwl_pcie_apm_stop(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
+ if (trans->cfg->lp_xtal_workaround) {
+ iwl_pcie_apm_lp_xtal_enable(trans);
+ return;
+ }
+
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -270,18 +401,15 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
static int iwl_pcie_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
/* nic_init */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_apm_init(trans);
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
- iwl_pcie_set_pwr(trans, false);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -326,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
+ int iter;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -334,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0)
return 0;
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ for (iter = 0; iter < 10; iter++) {
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
- do {
- ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ do {
+ ret = iwl_pcie_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
- usleep_range(200, 1000);
- t += 200;
- } while (t < 150000);
+ usleep_range(200, 1000);
+ t += 200;
+ } while (t < 150000);
+ msleep(25);
+ }
+
+ IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
return ret;
}
@@ -443,22 +577,181 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
- const struct fw_img *image)
+static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
+ int shift_param;
int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
- for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
- if (!image->sec[i].data)
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
break;
+ }
+
+ if (i == (*first_ucode_section) + 1)
+ /* set CPU to started */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_HDRS_LOADING_COMPLETED
+ << shift_param);
ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
+ /* image loading complete */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
+
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ (LMPM_CPU_UCODE_LOADING_COMPLETED |
+ LMPM_CPU_HDRS_LOADING_COMPLETED |
+ LMPM_CPU_UCODE_LOADING_STARTED) <<
+ shift_param);
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+ const struct fw_img *image)
+{
+ int ret = 0;
+ int first_ucode_section;
+
+ IWL_DEBUG_FW(trans,
+ "working with %s image\n",
+ image->is_secure ? "Secured" : "Non Secured");
+ IWL_DEBUG_FW(trans,
+ "working with %s CPU\n",
+ image->is_dual_cpus ? "Dual" : "Single");
+
+ /* configure the ucode to be ready to get the secured image */
+ if (image->is_secure) {
+ /* set secure boot inspector addresses */
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_CODE_ADDR,
+ LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_DATA_ADDR,
+ LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+
+ /* set CPU1 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
+ LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+
+ } else {
+ /* load to FW the binary Non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
+ /* load to FW the binary sections of CPU2 */
+ if (image->is_secure)
+ ret = iwl_pcie_load_cpu_secured_sections(
+ trans, image, 2,
+ &first_ucode_section);
+ else
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
+
+ /* release CPU reset */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+ else
+ iwl_write32(trans, CSR_RESET, 0);
+
+ if (image->is_secure) {
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
+ }
+ }
return 0;
}
@@ -466,7 +759,6 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
@@ -476,17 +768,15 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
return -EIO;
}
- clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
-
iwl_enable_rfkill_int(trans);
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
+ set_bit(STATUS_RFKILL, &trans->status);
else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ clear_bit(STATUS_RFKILL, &trans->status);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@@ -524,12 +814,14 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
+ bool hw_rfkill, was_hw_rfkill;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
@@ -541,7 +833,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -561,21 +853,51 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- iwl_enable_rfkill_int(trans);
+ spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/* clear all status bits */
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ /*
+ * Even if we stop the HW, we still want the RF kill
+ * interrupt
+ */
+ iwl_enable_rfkill_int(trans);
+
+ /*
+ * Check again since the RF kill state may have changed while
+ * all the interrupts were disabled, in this case we couldn't
+ * receive the RF kill interrupt and update the state in the
+ * op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
+ */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+{
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
+ iwl_trans_pcie_stop_device(trans);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -660,7 +982,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
@@ -670,6 +991,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return err;
}
+ /* Reset the entire device */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ usleep_range(10, 15);
+
iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */
@@ -677,53 +1003,30 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
+ set_bit(STATUS_RFKILL, &trans->status);
else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ clear_bit(STATUS_RFKILL, &trans->status);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
-static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
- bool op_mode_leaving)
+static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ /* disable interrupts - don't enable HW RF kill interrupt */
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_apm_stop(trans);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
-
- if (!op_mode_leaving) {
- /*
- * Even if we stop the HW, we still want the RF kill
- * interrupt
- */
- iwl_enable_rfkill_int(trans);
-
- /*
- * Check again since the RF kill state may have changed while
- * all the interrupts were disabled, in this case we couldn't
- * receive the RF kill interrupt and update the state in the
- * op_mode.
- */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
- }
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -756,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+ WARN_ON(1);
+ return 0;
+}
+
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
@@ -782,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ * As this function may be called again in some corner cases don't
+ * do anything if NAPI was already initialized.
+ */
+ if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ init_dummy_netdev(&trans_pcie->napi_dev);
+ iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ &trans_pcie->napi_dev,
+ iwl_pcie_dummy_napi_poll, 64);
+ }
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -802,37 +1123,19 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
pci_disable_device(trans_pcie->pci_dev);
kmem_cache_destroy(trans->dev_cmd_pool);
+ if (trans_pcie->napi.poll)
+ netif_napi_del(&trans_pcie->napi);
+
kfree(trans);
}
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (state)
- set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
else
- clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
-{
- return 0;
-}
-
-static int iwl_trans_pcie_resume(struct iwl_trans *trans)
-{
- bool hw_rfkill;
-
- iwl_enable_rfkill_int(trans);
-
- hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
-
- return 0;
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
}
-#endif /* CONFIG_PM_SLEEP */
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
unsigned long *flags)
@@ -842,6 +1145,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -881,6 +1187,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
}
}
+out:
/*
* Fool sparse by faking we release the lock - sparse will
* track nic_access anyway.
@@ -902,6 +1209,9 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
*/
__acquire(&trans_pcie->reg_lock);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
@@ -911,6 +1221,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
+out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
@@ -953,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
@@ -966,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u8 wr_ptr;
+
if (cnt == trans_pcie->cmd_queue)
continue;
+ if (!test_bit(cnt, trans_pcie->queue_used))
+ continue;
+ if (!(BIT(cnt) & txq_bm))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+ while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+ if (WARN_ONCE(wr_ptr != write_ptr,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
msleep(1);
+ }
if (q->read_ptr != q->write_ptr) {
IWL_ERR(trans,
@@ -980,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
ret = -ETIMEDOUT;
break;
}
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
if (!ret)
@@ -1014,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
cnt, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
}
@@ -1033,71 +1363,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
-static const char *get_fh_string(int cmd)
-{
-#define IWL_CMD(x) case x: return #x
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-#undef IWL_CMD
-}
-
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
-{
- int i;
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (buf) {
- int pos = 0;
- size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
-
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
-
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
-
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
-
- return pos;
- }
-#endif
-
- IWL_ERR(trans, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- IWL_ERR(trans, " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
-
- return 0;
-}
-
static const char *get_csr_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
@@ -1124,6 +1389,7 @@ static const char *get_csr_string(int cmd)
IWL_CMD(CSR_GIO_CHICKEN_BITS);
IWL_CMD(CSR_ANA_PLL_CFG);
IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_MONITOR_STATUS_REG);
IWL_CMD(CSR_DBG_HPET_MEM_REG);
default:
return "UNKNOWN";
@@ -1156,6 +1422,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
CSR_DRAM_INT_TBL_REG,
CSR_GIO_CHICKEN_BITS,
CSR_ANA_PLL_CFG,
+ CSR_MONITOR_STATUS_REG,
CSR_HW_REV_WA_REG,
CSR_DBG_HPET_MEM_REG
};
@@ -1178,18 +1445,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
} while (0)
/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
@@ -1197,7 +1453,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.open = simple_open, \
@@ -1205,8 +1460,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
@@ -1387,16 +1640,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char *buf = NULL;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_pcie_dump_fh(trans, &buf);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
+ ssize_t ret;
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
return ret;
}
@@ -1424,6 +1676,61 @@ err:
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
return -ENOMEM;
}
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+{
+ u32 cmdlen = 0;
+ int i;
+
+ for (i = 0; i < IWL_NUM_OF_TBS; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+
+ return cmdlen;
+}
+
+static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ void *buf, u32 buflen)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_fw_error_dump_data *data;
+ struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_fw_error_dump_txcmd *txcmd;
+ u32 len;
+ int i, ptr;
+
+ if (!buf)
+ return sizeof(*data) +
+ cmdq->q.n_window * (sizeof(*txcmd) +
+ TFD_MAX_PAYLOAD_SIZE);
+
+ len = 0;
+ data = buf;
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->q.write_ptr;
+ for (i = 0; i < cmdq->q.n_window; i++) {
+ u8 idx = get_cmd_index(&cmdq->q, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(ptr);
+ }
+ spin_unlock_bh(&cmdq->lock);
+
+ data->len = cpu_to_le32(len);
+ return sizeof(*data) + len;
+}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
struct dentry *dir)
@@ -1434,7 +1741,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
static const struct iwl_trans_ops trans_ops_pcie = {
.start_hw = iwl_trans_pcie_start_hw,
- .stop_hw = iwl_trans_pcie_stop_hw,
+ .op_mode_leave = iwl_trans_pcie_op_mode_leave,
.fw_alive = iwl_trans_pcie_fw_alive,
.start_fw = iwl_trans_pcie_start_fw,
.stop_device = iwl_trans_pcie_stop_device,
@@ -1454,10 +1761,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwl_trans_pcie_suspend,
- .resume = iwl_trans_pcie_resume,
-#endif
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,
@@ -1470,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.grab_nic_access = iwl_trans_pcie_grab_nic_access,
.release_nic_access = iwl_trans_pcie_release_nic_access,
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .dump_data = iwl_trans_pcie_dump_data,
+#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -1483,9 +1790,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-
- if (!trans)
- return NULL;
+ if (!trans) {
+ err = -ENOMEM;
+ goto out;
+ }
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1497,14 +1805,19 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- /* W/A - seems to solve weird behavior. We need to remove this if we
- * don't want to stay in L1 all the time. This wastes a lot of power */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
+ err = pci_enable_device(pdev);
+ if (err)
goto out_no_pci;
+
+ if (!cfg->base_params->pcie_l1_allowed) {
+ /*
+ * W/A - seems to solve weird behavior. We need to remove this
+ * if we don't want to stay in L1 all the time. This wastes a
+ * lot of power.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
}
pci_set_master(pdev);
@@ -1541,6 +1854,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ trans->dev = &pdev->dev;
+ trans_pcie->pci_dev = pdev;
+ iwl_disable_interrupts(trans);
+
err = pci_enable_msi(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1552,8 +1869,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
- trans->dev = &pdev->dev;
- trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1574,21 +1889,24 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
SLAB_HWCACHE_ALIGN,
NULL);
- if (!trans->dev_cmd_pool)
+ if (!trans->dev_cmd_pool) {
+ err = -ENOMEM;
goto out_pci_disable_msi;
-
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ }
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans)) {
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
return trans;
out_free_ict:
@@ -1603,5 +1921,6 @@ out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
kfree(trans);
- return NULL;
+out:
+ return ERR_PTR(err);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index c47c92165ab..038940afbdc 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -65,34 +65,40 @@
***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
+ * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < TFD_QUEUE_SIZE_MAX)
+ max = q->n_window;
+ else
+ max = TFD_QUEUE_SIZE_MAX - 1;
+
+ /*
+ * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
+ * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
}
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
{
- q->n_bd = count;
q->n_window = slots_num;
q->id = id;
- /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
- * and iwl_queue_dec_wrap are broken. */
- if (WARN_ON(!is_power_of_2(count)))
- return -EINVAL;
-
/* slots_num must be power-of-two size, otherwise
* get_cmd_index is broken. */
if (WARN_ON(!is_power_of_2(slots_num)))
@@ -185,17 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
i, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
}
for (i = q->read_ptr; i != q->write_ptr;
- i = iwl_queue_inc_wrap(i, q->n_bd))
+ i = iwl_queue_inc_wrap(i))
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_force_nmi(trans);
}
/*
@@ -275,53 +281,64 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
/*
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->q.id;
- if (txq->need_update == 0)
- return;
+ lockdep_assert_held(&txq->lock);
- if (trans->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- } else {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
- /* if we're trying to save power */
- if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(trans,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
+ /*
+ * explicitly wake up the NIC if:
+ * 1. shadow registers aren't enabled
+ * 2. NIC is woken up for CMD regardless of shadow outside this function
+ * 3. there is a chance that the NIC is asleep
+ */
+ if (!trans->cfg->base_params->shadow_reg_enable &&
+ txq_id != trans_pcie->cmd_queue &&
+ test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+ /*
+ * wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part.
+ */
+ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+ txq_id, reg);
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ txq->need_update = true;
+ return;
+ }
+ }
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
- txq->q.write_ptr);
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = &trans_pcie->txq[i];
- /*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- } else
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
+ spin_lock_bh(&txq->lock);
+ if (trans_pcie->txq[i].need_update) {
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ trans_pcie->txq[i].need_update = false;
+ }
+ spin_unlock_bh(&txq->lock);
}
- txq->need_update = 0;
}
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -336,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
return addr;
}
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
dma_addr_t addr, u16 len)
{
@@ -402,13 +412,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
- /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
int rd_ptr = txq->q.read_ptr;
int idx = get_cmd_index(&txq->q, rd_ptr);
lockdep_assert_held(&txq->lock);
- /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
/* free SKB */
@@ -429,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
}
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
- dma_addr_t addr, u16 len, u8 reset)
+ dma_addr_t addr, u16 len, bool reset)
{
struct iwl_queue *q;
struct iwl_tfd *tfd, *tfd_tmp;
@@ -451,13 +465,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return -EINVAL;
}
- if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+ if (WARN(addr & ~IWL_TX_DMA_MASK,
+ "Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(trans, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
@@ -538,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
{
int ret;
- txq->need_update = 0;
+ txq->need_update = false;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
+ ret = iwl_queue_init(&txq->q, slots_num, txq_id);
if (ret)
return ret;
@@ -571,15 +581,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
- if (!q->n_bd)
- return;
-
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, q->read_ptr);
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
}
txq->active = false;
spin_unlock_bh(&txq->lock);
@@ -616,10 +623,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
}
/* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, sizeof(struct iwl_tfd) *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->q.dma_addr);
txq->q.dma_addr = 0;
+ txq->tfds = NULL;
dma_free_coherent(dev,
sizeof(*txq->scratchbufs) * txq->q.n_window,
@@ -676,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+ if (trans->cfg->base_params->scd_chain_ext_wa)
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
trans_pcie->cmd_fifo);
@@ -696,8 +706,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
@@ -730,10 +741,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ch, txq_id, ret;
- unsigned long flags;
/* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_txq_set_sched(trans, 0);
@@ -750,13 +760,19 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
}
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
- if (!trans_pcie->txq) {
- IWL_WARN(trans,
- "Stopping tx queues that aren't allocated...\n");
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* This can happen: start_hw, stop_device */
+ if (!trans_pcie->txq)
return 0;
- }
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -829,7 +845,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
- ret = ENOMEM;
+ ret = -ENOMEM;
goto error;
}
@@ -858,7 +874,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
int txq_id, slots_num;
- unsigned long flags;
bool alloc = false;
if (!trans_pcie->txq) {
@@ -868,7 +883,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
iwl_write_prph(trans, SCD_TXFACT, 0);
@@ -877,7 +892,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -922,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- /* n_bd is usually 256 => n_bd - 1 = 0xff */
- int tfd_num = ssn & (txq->q.n_bd - 1);
+ int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
struct iwl_queue *q = &txq->q;
int last_to_free;
@@ -947,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+ last_to_free = iwl_queue_dec_wrap(tfd_num);
if (!iwl_queue_used(q, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
+ __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
goto out;
}
@@ -962,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
for (;
q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
continue;
@@ -996,28 +1010,40 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
+ unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
- if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, q->n_bd,
+ __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_force_nmi(trans);
}
}
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ q->read_ptr == q->write_ptr) {
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ WARN_ON(!trans_pcie->cmd_in_flight);
+ trans_pcie->cmd_in_flight = false;
+ __iwl_trans_pcie_clear_bit(trans,
+ CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+
iwl_pcie_txq_progress(trans_pcie, txq);
}
@@ -1093,6 +1119,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
* non-AGG queue.
*/
iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+ ssn = trans_pcie->txq[txq_id].q.read_ptr;
}
/* Place first TFD at index corresponding to start sequence number.
@@ -1132,8 +1160,15 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
- WARN_ONCE(1, "queue %d not used", txq_id);
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", txq_id);
return;
}
@@ -1153,10 +1188,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
- * @cmd: a point to the ucode command structure
+ * @cmd: a pointer to the ucode command structure
*
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
@@ -1167,12 +1202,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false;
- int i;
+ int i, ret;
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
@@ -1279,28 +1315,39 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
cmd_pos = offsetof(struct iwl_device_cmd, payload);
copy_size = sizeof(out_cmd->hdr);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- int copy = 0;
+ int copy;
if (!cmd->len[i])
continue;
- /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
- if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
- copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
-
- if (copy > cmd->len[i])
- copy = cmd->len[i];
- }
-
/* copy everything if not nocopy/dup */
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP)))
+ IWL_HCMD_DFL_DUP))) {
copy = cmd->len[i];
- if (copy) {
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
cmd_pos += copy;
copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
+ * in total (for the scratchbuf handling), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+ copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
}
}
@@ -1315,7 +1362,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
iwl_pcie_txq_build_tfd(trans, txq,
iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
- scratch_size, 1);
+ scratch_size, true);
/* map first command fragment, if any remains */
if (copy_size > scratch_size) {
@@ -1331,7 +1378,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
- copy_size - scratch_size, 0);
+ copy_size - scratch_size, false);
}
/* map the remaining (adjusted) nocopy/dup fragments */
@@ -1354,7 +1401,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto out;
}
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
out_meta->flags = cmd->flags;
@@ -1362,18 +1409,46 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
kfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
- txq->need_update = 1;
-
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
+ */
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ !trans_pcie->cmd_in_flight) {
+ trans_pcie->cmd_in_flight = true;
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ trans_pcie->cmd_in_flight = false;
+ idx = -EIO;
+ goto out;
+ }
+ }
+
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
out:
spin_unlock_bh(&txq->lock);
free_dup_buf:
@@ -1438,12 +1513,12 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
- if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
+ if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
IWL_WARN(trans,
"HCMD_ACTIVE already clear for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
}
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->hdr.cmd));
wake_up(&trans_pcie->wait_command_queue);
@@ -1454,7 +1529,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
spin_unlock_bh(&txq->lock);
}
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1486,12 +1561,11 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
- if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status))) {
- IWL_ERR(trans, "Command %s: a command is already active!\n",
- get_cmd_string(trans_pcie, cmd->id));
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n",
+ get_cmd_string(trans_pcie, cmd->id)))
return -EIO;
- }
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(trans_pcie, cmd->id));
@@ -1499,7 +1573,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(trans_pcie, cmd->id), ret);
@@ -1507,34 +1581,32 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE,
- &trans_pcie->status),
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
- struct iwl_txq *txq =
- &trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_queue *q = &txq->q;
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_queue *q = &txq->q;
- IWL_ERR(trans,
- "Error sending %s: time out after %dms.\n",
- get_cmd_string(trans_pcie, cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ get_cmd_string(trans_pcie, cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_ERR(trans,
- "Current CMD queue read_ptr %d write_ptr %d\n",
- q->read_ptr, q->write_ptr);
-
- clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
- IWL_DEBUG_INFO(trans,
- "Clearing HCMD_ACTIVE for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ q->read_ptr, q->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -ETIMEDOUT;
+
+ iwl_force_nmi(trans);
+ iwl_trans_fw_error(trans);
+
+ goto cancel;
}
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
get_cmd_string(trans_pcie, cmd->id));
dump_stack();
@@ -1543,7 +1615,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ test_bit(STATUS_RFKILL, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
ret = -ERFKILL;
goto cancel;
@@ -1580,13 +1652,8 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
- return -EIO;
-
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ test_bit(STATUS_RFKILL, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
cmd->id);
return -ERFKILL;
@@ -1611,7 +1678,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
u16 len, tb1_len, tb2_len;
- u8 wait_write_ptr = 0;
+ bool wait_write_ptr;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
u16 wifi_seq;
@@ -1619,10 +1686,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
- if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
- WARN_ON_ONCE(1);
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ "TX on unused queue %d\n", txq_id))
return -EINVAL;
- }
spin_lock(&txq->lock);
@@ -1632,7 +1698,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* Check here that the packets are in the right place on the ring.
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
+ WARN_ONCE(txq->ampdu &&
(wifi_seq & 0xff) != q->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
txq_id, wifi_seq, q->write_ptr);
@@ -1641,7 +1707,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->entries[q->write_ptr].skb = skb;
txq->entries[q->write_ptr].cmd = dev_cmd;
- dev_cmd->hdr.cmd = REPLY_TX;
dev_cmd->hdr.sequence =
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr)));
@@ -1664,7 +1729,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
- tb1_len = (len + 3) & ~3;
+ tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
if (tb1_len != len)
@@ -1674,7 +1739,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
IWL_HCMD_SCRATCHBUF_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
- IWL_HCMD_SCRATCHBUF_SIZE, 1);
+ IWL_HCMD_SCRATCHBUF_SIZE, true);
/* there must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
@@ -1684,7 +1749,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
goto out_err;
- iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
+ iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
/*
* Set up TFD's third entry to point directly to remainder
@@ -1700,7 +1765,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
&txq->tfds[q->write_ptr]);
goto out_err;
}
- iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
+ iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
}
/* Set up entry for this TFD in Tx byte-count array */
@@ -1714,12 +1779,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx_data(trans->dev, skb,
skb->data + hdr_len, tb2_len);
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1727,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ if (!wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
+ * and we will get a TX status notification eventually.
*/
if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr) {
- txq->need_update = 1;
+ if (wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- } else {
+ else
iwl_stop_queue(trans, txq);
- }
}
spin_unlock(&txq->lock);
return 0;