aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig42
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h29
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c9
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c30
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c42
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c416
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c17
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1175
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4065
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h661
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2448
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c284
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c1142
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1147
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h122
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c1157
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h114
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c540
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h225
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c142
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c1263
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2474
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c1074
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h276
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2209
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c2047
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c736
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h61
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c274
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
37 files changed, 21549 insertions, 2717 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a8669adecc9..d49cba12908 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -35,6 +35,48 @@ config QLCNIC
This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
devices.
+config QLCNIC_SRIOV
+ bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
+ depends on QLCNIC && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QLE83XX Converged Ethernet devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
+config QLCNIC_DCB
+ bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
+ depends on QLCNIC && DCB
+ default y
+ ---help---
+ This configuration parameter enables DCB support in QLE83XX
+ and QLE82XX Converged Ethernet devices. This allows for DCB
+ get operations support through rtNetlink interface. Only CEE
+ mode of DCB is supported. PG and PFC values are related only
+ to Tx.
+
+config QLCNIC_VXLAN
+ bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
+ default n
+ depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
+ ---help---
+ This enables hardware offload support for VXLAN protocol over QLogic's
+ 84XX series adapters.
+ Say Y here if you want to enable hardware offload support for
+ Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
+config QLCNIC_HWMON
+ bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+ depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+ default y
+ ---help---
+ This configuration parameter can be used to read the
+ board temperature in Converged Ethernet devices
+ supported by qlcnic.
+
+ This data is available via the hwmon sysfs interface.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
index 861a0590b1f..e14e60c8838 100644
--- a/drivers/net/ethernet/qlogic/netxen/Makefile
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -13,9 +13,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
-# MA 02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution
# in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index eb3dfdbb642..6e426ae9469 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -53,8 +51,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 80
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.80"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.82"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -955,9 +953,10 @@ typedef struct nx_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
} nx_mac_list_t;
-struct nx_vlan_ip_list {
+struct nx_ip_list {
struct list_head list;
__be32 ip_addr;
+ bool master;
};
/*
@@ -1170,7 +1169,6 @@ typedef struct {
#define NETXEN_DB_MAPSIZE_BYTES 0x1000
-#define NETXEN_NETDEV_WEIGHT 128
#define NETXEN_ADAPTER_UP_MAGIC 777
#define NETXEN_NIC_PEG_TUNE 0
@@ -1605,7 +1603,7 @@ struct netxen_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct list_head mac_list;
- struct list_head vlan_ip_list;
+ struct list_head ip_list;
spinlock_t tx_clean_lock;
@@ -1854,7 +1852,7 @@ static const struct netxen_brdinfo netxen_boards[] = {
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
-static inline void get_brd_name_by_type(u32 type, char *name)
+static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name)
{
int i, found = 0;
for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
@@ -1863,10 +1861,14 @@ static inline void get_brd_name_by_type(u32 type, char *name)
found = 1;
break;
}
+ }
+ if (!found) {
+ strcpy(name, "Unknown");
+ return -EINVAL;
}
- if (!found)
- name = "Unknown";
+
+ return 0;
}
static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
@@ -1879,9 +1881,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
- int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
extern const struct ethtool_ops netxen_nic_ethtool_ops;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 7f556a84925..6f6be57f469 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -201,11 +199,8 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
adapter->mdump.md_template =
kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
- if (!adapter->mdump.md_template) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory "
- "for minidump template.\n");
+ if (!adapter->mdump.md_template)
return -ENOMEM;
- }
err = netxen_get_minidump_template(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 4ca2c196c98..87e073c6e29 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index 28e076960bc..a310c2f6502 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -734,6 +732,9 @@ enum {
#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700))
#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+#define NETXEN_INTR_MODE_REG NETXEN_NIC_REG(0x44)
+#define NETXEN_MSI_MODE 0x1
+#define NETXEN_INTX_MODE 0x2
#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18))
#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c))
@@ -955,6 +956,7 @@ enum {
#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178))
/* MiniDIMM related macros */
#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 946160fa584..db4280ce9c0 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -536,10 +534,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 null_addr[6];
+ u8 null_addr[ETH_ALEN];
int i;
- memset(null_addr, 0, 6);
+ memset(null_addr, 0, ETH_ALEN);
if (netdev->flags & IFF_PROMISC) {
@@ -648,7 +646,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
mac_req = (nx_mac_req_t *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
@@ -663,18 +661,16 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
list_for_each(head, del_list) {
cur = list_entry(head, nx_mac_list_t, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ if (ether_addr_equal(addr, cur->mac_addr)) {
list_move_tail(head, &adapter->mac_list);
return 0;
}
}
cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add mac address filter\n",
- adapter->netdev->name);
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
list_add_tail(&cur->list, &adapter->mac_list);
return nx_p3_sre_macaddr_change(adapter,
@@ -2568,16 +2564,10 @@ netxen_dump_fw(struct netxen_adapter *adapter)
adapter->mdump.md_capture_size;
if (!adapter->mdump.md_capture_buff) {
adapter->mdump.md_capture_buff =
- vmalloc(adapter->mdump.md_dump_size);
- if (!adapter->mdump.md_capture_buff) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate memory for minidump "
- "capture_buffer(%d bytes).\n",
- adapter->mdump.md_dump_size);
+ vzalloc(adapter->mdump.md_dump_size);
+ if (!adapter->mdump.md_capture_buff)
return;
- }
- memset(adapter->mdump.md_capture_buff, 0,
- adapter->mdump.md_dump_size);
+
if (netxen_collect_minidump(adapter)) {
adapter->mdump.has_valid_dump = 0;
adapter->mdump.md_dump_size = 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
index e2c5b6f2df0..7433c4d2160 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index bc165f4d0f6..32058614151 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -27,6 +25,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <net/checksum.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -144,7 +143,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -197,41 +196,33 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
struct nx_host_sds_ring *sds_ring;
struct nx_host_tx_ring *tx_ring;
struct netxen_rx_buffer *rx_buf;
- int ring, i, size;
+ int ring, i;
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- size = sizeof(struct nx_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
- netdev->name);
+ tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
return -ENOMEM;
- }
+
adapter->tx_ring = tx_ring;
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
- netdev->name);
+ if (cmd_buf_arr == NULL)
goto err_out;
- }
+
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
- netdev->name);
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct nx_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -1611,13 +1602,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
u32 seq_number;
u8 vhdr_len = 0;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -1649,9 +1640,8 @@ netxen_process_lro(struct netxen_adapter *adapter,
th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
th->psh = push;
th->seq = htonl(seq_number);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 6098fd4adfe..5bf05818a12 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -14,9 +14,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution
* in the file called "COPYING".
@@ -90,7 +88,7 @@ static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
static irqreturn_t netxen_msix_intr(int irq, void *data);
-static void netxen_free_vlan_ip_list(struct netxen_adapter *);
+static void netxen_free_ip_list(struct netxen_adapter *, bool);
static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
@@ -197,7 +195,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_add(netdev, &sds_ring->napi,
- netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
+ netxen_nic_poll, NAPI_POLL_WEIGHT);
}
return 0;
@@ -459,16 +457,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
- int pos;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
+ if (pdev->msix_cap) {
+ pci_read_config_dword(pdev, pdev->msix_cap, &control);
if (enable)
control |= PCI_MSIX_FLAGS_ENABLE;
else
control = 0;
- pci_write_config_dword(pdev, pos, control);
+ pci_write_config_dword(pdev, pdev->msix_cap, control);
}
}
@@ -501,12 +497,11 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = *(p + 5 - i);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
return 0;
@@ -593,51 +588,64 @@ static const struct net_device_ops netxen_netdev_ops = {
#endif
};
-static void
-netxen_setup_intr(struct netxen_adapter *adapter)
+static inline bool netxen_function_zero(struct pci_dev *pdev)
{
- struct netxen_legacy_intr_set *legacy_intrp;
- struct pci_dev *pdev = adapter->pdev;
- int err, num_msix;
+ return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
+}
- if (adapter->rss_supported) {
- num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
- MSIX_ENTRIES_PER_ADAPTER : 2;
- } else
- num_msix = 1;
+static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
+ u32 mode)
+{
+ NXWR32(adapter, NETXEN_INTR_MODE_REG, mode);
+}
- adapter->max_sds_rings = 1;
+static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter)
+{
+ return NXRD32(adapter, NETXEN_INTR_MODE_REG);
+}
- adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+static void
+netxen_initialize_interrupt_registers(struct netxen_adapter *adapter)
+{
+ struct netxen_legacy_intr_set *legacy_intrp;
+ u32 tgt_status_reg, int_state_reg;
if (adapter->ahw.revision_id >= NX_P3_B0)
legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
else
legacy_intrp = &legacy_intr[0];
+ tgt_status_reg = legacy_intrp->tgt_status_reg;
+ int_state_reg = ISR_INT_STATE_REG;
+
adapter->int_vec_bit = legacy_intrp->int_vec_bit;
- adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
- legacy_intrp->tgt_status_reg);
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg);
adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
- legacy_intrp->tgt_mask_reg);
+ legacy_intrp->tgt_mask_reg);
adapter->pci_int_reg = netxen_get_ioaddr(adapter,
- legacy_intrp->pci_int_reg);
+ legacy_intrp->pci_int_reg);
adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
if (adapter->ahw.revision_id >= NX_P3_B1)
adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
- ISR_INT_STATE_REG);
+ int_state_reg);
else
adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
- CRB_INT_VECTOR);
+ CRB_INT_VECTOR);
+}
- netxen_set_msix_bit(pdev, 0);
+static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
+ int num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 value;
+ int err;
if (adapter->msix_supported) {
-
netxen_init_msix_entries(adapter, num_msix);
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev, adapter->msix_entries,
+ num_msix, num_msix);
+ if (err > 0) {
adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
netxen_set_msix_bit(pdev, 1);
@@ -645,26 +653,59 @@ netxen_setup_intr(struct netxen_adapter *adapter)
adapter->max_sds_rings = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
- return;
+ return 0;
}
-
- if (err > 0)
- pci_disable_msix(pdev);
-
/* fall through for msi */
}
if (use_msi && !pci_enable_msi(pdev)) {
+ value = msi_tgt_status[adapter->ahw.pci_func];
adapter->flags |= NETXEN_NIC_MSI_ENABLED;
- adapter->tgt_status_reg = netxen_get_ioaddr(adapter,
- msi_tgt_status[adapter->ahw.pci_func]);
- dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value);
adapter->msix_entries[0].vector = pdev->irq;
- return;
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n");
+ return -EIO;
+}
+
+static int netxen_setup_intr(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int num_msix;
+
+ if (adapter->rss_supported)
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+ adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+
+ netxen_initialize_interrupt_registers(adapter);
+ netxen_set_msix_bit(pdev, 0);
+
+ if (netxen_function_zero(pdev)) {
+ if (!netxen_setup_msi_interrupts(adapter, num_msix))
+ netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
+ else
+ netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE);
+ } else {
+ if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE &&
+ netxen_setup_msi_interrupts(adapter, num_msix)) {
+ dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n");
+ return -EIO;
+ }
}
- dev_info(&pdev->dev, "using legacy interrupts\n");
- adapter->msix_entries[0].vector = pdev->irq;
+ if (!NETXEN_IS_MSI_FAMILY(adapter)) {
+ adapter->msix_entries[0].vector = pdev->irq;
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ }
+ return 0;
}
static void
@@ -842,7 +883,9 @@ netxen_check_options(struct netxen_adapter *adapter)
}
if (adapter->portnum == 0) {
- get_brd_name_by_type(adapter->ahw.board_type, brd_name);
+ if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type,
+ brd_name))
+ strcpy(serial_num, "Unknown");
pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
module_name(THIS_MODULE),
@@ -861,9 +904,9 @@ netxen_check_options(struct netxen_adapter *adapter)
adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
}
- dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
- fw_major, fw_minor, fw_build,
- adapter->ahw.cut_through ? "cut-through" : "legacy");
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n",
+ NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
@@ -1330,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
netxen_nic_change_mtu(netdev, netdev->mtu);
- SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+ netdev->ethtool_ops = &netxen_nic_ethtool_ops;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM;
@@ -1346,7 +1389,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
}
if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
- netdev->hw_features |= NETIF_F_HW_VLAN_TX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
netdev->hw_features |= NETIF_F_LRO;
@@ -1371,6 +1414,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
return 0;
}
+#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+ u32 temp;
+
+ /* Print ULA info only once for an adapter */
+ if (adapter->portnum != 0)
+ return;
+
+ temp = NXRD32(adapter, NETXEN_ULA_KEY);
+ switch (temp) {
+ case NETXEN_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "ULA adapter");
+ break;
+ case NETXEN_NON_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "non ULA adapter");
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
#ifdef CONFIG_PCIEAER
static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
{
@@ -1451,7 +1520,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- INIT_LIST_HEAD(&adapter->vlan_ip_list);
+ INIT_LIST_HEAD(&adapter->ip_list);
err = netxen_setup_pci_map(adapter);
if (err)
@@ -1509,7 +1578,15 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netxen_nic_clear_stats(adapter);
- netxen_setup_intr(adapter);
+ err = netxen_setup_intr(adapter);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to setup interrupts, error = %d\n", err);
+ goto err_out_disable_msi;
+ }
+
+ netxen_read_ula_info(adapter);
err = netxen_setup_netdev(adapter, netdev);
if (err)
@@ -1552,7 +1629,6 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
}
@@ -1586,7 +1662,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->tx_timeout_task);
- netxen_free_vlan_ip_list(adapter);
+ netxen_free_ip_list(adapter, false);
netxen_nic_detach(adapter);
nx_decr_dev_ref_cnt(adapter);
@@ -1597,7 +1673,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
clear_bit(__NX_RESETTING, &adapter->state);
netxen_teardown_intr(adapter);
-
+ netxen_set_interrupt_mode(adapter, 0);
netxen_remove_diag_entries(adapter);
netxen_cleanup_pci_map(adapter);
@@ -1611,7 +1687,6 @@ static void netxen_nic_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
@@ -1963,10 +2038,12 @@ unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ nf->dma = 0ULL;
out_err:
return -ENOMEM;
@@ -2720,7 +2797,7 @@ netxen_store_bridged_mode(struct device *dev,
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
goto err_out;
- if (strict_strtoul(buf, 2, &new))
+ if (kstrtoul(buf, 2, &new))
goto err_out;
if (!netxen_config_bridged_mode(adapter, !!new))
@@ -2759,7 +2836,7 @@ netxen_store_diag_mode(struct device *dev,
struct netxen_adapter *adapter = dev_get_drvdata(dev);
unsigned long new;
- if (strict_strtoul(buf, 2, &new))
+ if (kstrtoul(buf, 2, &new))
return -EINVAL;
if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
@@ -3136,65 +3213,77 @@ netxen_destip_supported(struct netxen_adapter *adapter)
}
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{
- struct nx_vlan_ip_list *cur;
- struct list_head *head = &adapter->vlan_ip_list;
+ struct nx_ip_list *cur, *tmp_cur;
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct nx_vlan_ip_list, list);
- netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
- list_del(&cur->list);
- kfree(cur);
+ list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
+ if (master) {
+ if (cur->master) {
+ netxen_config_ipaddr(adapter, cur->ip_addr,
+ NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ } else {
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
}
-
}
-static void
-netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
+
+static bool
+netxen_list_config_ip(struct netxen_adapter *adapter,
struct in_ifaddr *ifa, unsigned long event)
{
struct net_device *dev;
- struct nx_vlan_ip_list *cur, *tmp_cur;
+ struct nx_ip_list *cur, *tmp_cur;
struct list_head *head;
+ bool ret = false;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
if (dev == NULL)
- return;
-
- if (!is_vlan_dev(dev))
- return;
+ goto out;
switch (event) {
case NX_IP_UP:
- list_for_each(head, &adapter->vlan_ip_list) {
- cur = list_entry(head, struct nx_vlan_ip_list, list);
+ list_for_each(head, &adapter->ip_list) {
+ cur = list_entry(head, struct nx_ip_list, list);
if (cur->ip_addr == ifa->ifa_address)
- return;
- }
-
- cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add vlan ip to list\n",
- adapter->netdev->name);
- return;
+ goto out;
}
+ cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
+ if (cur == NULL)
+ goto out;
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ dev = vlan_dev_real_dev(dev);
+ cur->master = !!netif_is_bond_master(dev);
cur->ip_addr = ifa->ifa_address;
- list_add_tail(&cur->list, &adapter->vlan_ip_list);
+ list_add_tail(&cur->list, &adapter->ip_list);
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ ret = true;
break;
case NX_IP_DOWN:
list_for_each_entry_safe(cur, tmp_cur,
- &adapter->vlan_ip_list, list) {
+ &adapter->ip_list, list) {
if (cur->ip_addr == ifa->ifa_address) {
list_del(&cur->list);
kfree(cur);
+ netxen_config_ipaddr(adapter, ifa->ifa_address,
+ NX_IP_DOWN);
+ ret = true;
break;
}
}
}
+out:
+ return ret;
}
+
static void
netxen_config_indev_addr(struct netxen_adapter *adapter,
struct net_device *dev, unsigned long event)
@@ -3211,14 +3300,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
for_ifa(indev) {
switch (event) {
case NETDEV_UP:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
+ netxen_list_config_ip(adapter, ifa, NX_IP_UP);
break;
case NETDEV_DOWN:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
+ netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
break;
default:
break;
@@ -3233,23 +3318,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct nx_vlan_ip_list *pos, *tmp_pos;
+ struct nx_ip_list *pos, *tmp_pos;
unsigned long ip_event;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
netxen_config_indev_addr(adapter, netdev, event);
- list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) {
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
}
}
+static inline bool
+netxen_config_checkdev(struct net_device *dev)
+{
+ struct netxen_adapter *adapter;
+
+ if (!is_netxen_netdev(dev))
+ return false;
+ adapter = netdev_priv(dev);
+ if (!adapter)
+ return false;
+ if (!netxen_destip_supported(adapter))
+ return false;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return false;
+
+ return true;
+}
+
+/**
+ * netxen_config_master - configure addresses based on master
+ * @dev: netxen device
+ * @event: netdev event
+ */
+static void netxen_config_master(struct net_device *dev, unsigned long event)
+{
+ struct net_device *master, *slave;
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ /*
+ * This is the case where the netxen nic is being
+ * enslaved and is dev_open()ed in bond_enslave()
+ * Now we should program the bond's (and its vlans')
+ * addresses in the netxen NIC.
+ */
+ if (master && netif_is_bond_master(master) &&
+ !netif_is_bond_slave(dev)) {
+ netxen_config_indev_addr(adapter, master, event);
+ for_each_netdev_rcu(&init_net, slave)
+ if (slave->priv_flags & IFF_802_1Q_VLAN &&
+ vlan_dev_real_dev(slave) == master)
+ netxen_config_indev_addr(adapter, slave, event);
+ }
+ rcu_read_unlock();
+ /*
+ * This is the case where the netxen nic is being
+ * released and is dev_close()ed in bond_release()
+ * just before IFF_BONDING is stripped.
+ */
+ if (!master && dev->priv_flags & IFF_BONDING)
+ netxen_free_ip_list(adapter, true);
+}
+
static int netxen_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
- struct net_device *dev = (struct net_device *)ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net_device *orig_dev = dev;
+ struct net_device *slave;
recheck:
if (dev == NULL)
@@ -3259,19 +3399,28 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter)
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- netxen_config_indev_addr(adapter, orig_dev, event);
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_config_indev_addr(adapter,
+ orig_dev, event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ /* Act only if the actual netxen is the target */
+ if (orig_dev == dev)
+ netxen_config_master(dev, event);
+ netxen_config_indev_addr(adapter, orig_dev, event);
+ }
+ }
done:
return NOTIFY_DONE;
}
@@ -3281,12 +3430,12 @@ netxen_inetaddr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
- struct net_device *dev;
-
+ struct net_device *dev, *slave;
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ unsigned long ip_event;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
goto done;
@@ -3295,31 +3444,24 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter || !netxen_destip_supported(adapter))
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- switch (event) {
- case NETDEV_UP:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
- break;
- case NETDEV_DOWN:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
- break;
- default:
- break;
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
}
-
done:
return NOTIFY_DONE;
}
@@ -3336,7 +3478,7 @@ static void
netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{ }
#endif
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 67a679aaf29..b5d6bc1a8b0 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -312,7 +311,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len);
if (unlikely(!lrg_buf_cb->skb)) {
- netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
qdev->lrg_buf_skb_check++;
} else {
/*
@@ -2591,13 +2589,11 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
else
qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
- qdev->lrg_buf =
- kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
- GFP_KERNEL);
- if (qdev->lrg_buf == NULL) {
- netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
+ qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
+ sizeof(struct ql_rcv_buf_cb),
+ GFP_KERNEL);
+ if (qdev->lrg_buf == NULL)
return -ENOMEM;
- }
qdev->lrg_buf_q_alloc_virt_addr =
pci_alloc_consistent(qdev->pdev,
@@ -3842,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
/* Set driver entry points */
ndev->netdev_ops = &ql3xxx_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+ ndev->ethtool_ops = &ql3xxx_ethtool_ops;
ndev->watchdog_timeo = 5 * HZ;
netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
@@ -3867,7 +3863,6 @@ static int ql3xxx_probe(struct pci_dev *pdev,
ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
@@ -3920,7 +3915,6 @@ err_out_free_regions:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
err_out:
return err;
}
@@ -3943,7 +3937,6 @@ static void ql3xxx_remove(struct pci_dev *pdev)
iounmap(qdev->mem_map_registers);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index c4b8ced8382..a848d297972 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -6,4 +6,10 @@ obj-$(CONFIG_QLCNIC) := qlcnic.o
qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
- qlcnic_sysfs.o qlcnic_minidump.o
+ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
+ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
+ qlcnic_minidump.o qlcnic_sriov_common.o
+
+qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
+
+qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index bc7ec64e9c7..be618b9e874 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -20,10 +20,10 @@
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
-
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/timer.h>
+#include <linux/irq.h>
#include <linux/vmalloc.h>
@@ -33,11 +33,14 @@
#include <linux/if_vlan.h>
#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_dcb.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 30
-#define QLCNIC_LINUX_VERSIONID "5.0.30"
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 60
+#define QLCNIC_LINUX_VERSIONID "5.3.60"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -97,6 +100,28 @@
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING 1
+#define QLCNIC_DEF_SDS_RINGS 4
+#define QLCNIC_DEF_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+#define QLCNIC_83XX_MINIMUM_VECTOR 3
+#define QLCNIC_82XX_MINIMUM_VECTOR 2
+
+enum qlcnic_queue_type {
+ QLCNIC_TX_QUEUE = 1,
+ QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE 0xFF
+#define QLCNIC_DEFAULT_MODE 0x0
+
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT 16
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -145,11 +170,20 @@ struct cmd_desc_type0 {
__le64 addr_buffer2;
- __le16 reference_handle;
+ __le16 encap_descr; /* 15:10 offset of outer L3 header,
+ * 9:6 number of 32bit words in outer L3 header,
+ * 5 offload outer L4 checksum,
+ * 4 offload outer L3 checksum,
+ * 3 Inner L4 type, TCP=0, UDP=1,
+ * 2 Inner L3 type, IPv4=0, IPv6=1,
+ * 1 Outer L3 type,IPv4=0, IPv6=1,
+ * 0 type of encapsulation, GRE=0, VXLAN=1
+ */
__le16 mss;
u8 port_ctxid; /* 7:4 ctxid 3:0 port */
- u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
- __le16 conn_id; /* IPSec offoad only */
+ u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ u8 outer_hdr_length; /* Encapsulation only */
+ u8 rsvd1;
__le64 addr_buffer3;
__le64 addr_buffer1;
@@ -159,7 +193,9 @@ struct cmd_desc_type0 {
__le64 addr_buffer4;
u8 eth_addr[ETH_ALEN];
- __le16 vlan_TCI;
+ __le16 vlan_TCI; /* In case of encapsulation,
+ * this is for outer VLAN
+ */
} __attribute__ ((aligned(64)));
@@ -203,6 +239,7 @@ struct uni_data_desc{
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FDT_LOCATION 0x3F0000
#define QLCNIC_B0_FW_IMAGE_REGION 0x74
#define QLCNIC_C0_FW_IMAGE_REGION 0x97
#define QLCNIC_BOOTLD_REGION 0X72
@@ -223,6 +260,36 @@ struct qlcnic_flt_entry {
u32 end_addr;
};
+/* Flash Descriptor Table */
+struct qlcnic_fdt {
+ u32 valid;
+ u16 ver;
+ u16 len;
+ u16 cksum;
+ u16 unused;
+ u8 model[16];
+ u16 mfg_id;
+ u16 id;
+ u8 flag;
+ u8 erase_cmd;
+ u8 alt_erase_cmd;
+ u8 write_enable_cmd;
+ u8 write_enable_bits;
+ u8 write_statusreg_cmd;
+ u8 unprotected_sec_cmd;
+ u8 read_manuf_cmd;
+ u32 block_size;
+ u32 alt_block_size;
+ u32 flash_size;
+ u32 write_enable_data;
+ u8 readid_addr_len;
+ u8 write_disable_bits;
+ u8 read_dev_id_len;
+ u8 chip_erase_cmd;
+ u16 read_timeo;
+ u8 protected_sec_cmd;
+ u8 resvd[65];
+};
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
@@ -267,6 +334,11 @@ struct qlcnic_flt_entry {
extern char qlcnic_driver_name[];
+extern int qlcnic_use_msi;
+extern int qlcnic_use_msi_x;
+extern int qlcnic_auto_fw_reset;
+extern int qlcnic_load_fw_file;
+
/* Number of status descriptors to handle per interrupt */
#define MAX_STATUS_HANDLE (64)
@@ -309,22 +381,32 @@ struct qlcnic_rx_buffer {
* Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
* adjusted based on configured MTU.
*/
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define QLCNIC_INTR_COAL_TYPE_RX 1
+#define QLCNIC_INTR_COAL_TYPE_TX 2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX 3
+
+#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
+
+#define QLCNIC_DEF_INTR_COALESCE_TX_TIME_US 64
+#define QLCNIC_DEF_INTR_COALESCE_TX_PACKETS 64
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
+#define QLCNIC_DEV_INFO_SIZE 2
struct qlcnic_nic_intr_coalesce {
u8 type;
u8 sts_ring_mask;
u16 rx_packets;
u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
u16 flag;
u32 timer_out;
};
-struct qlcnic_dump_template_hdr {
+struct qlcnic_83xx_dump_template_hdr {
u32 type;
u32 offset;
u32 size;
@@ -337,15 +419,48 @@ struct qlcnic_dump_template_hdr {
u32 sys_info[3];
u32 saved_state[16];
u32 cap_sizes[8];
+ u32 ocm_wnd_reg[16];
u32 rsvd[0];
};
+struct qlcnic_82xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 rsvd[7];
+ u32 capabilities;
+ u32 rsvd1[0];
+};
+
+#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
+
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
- u8 enable; /* enable/disable dump */
+ bool enable; /* enable/disable dump */
u32 size; /* total size of the dump */
+ u32 cap_mask; /* Current capture mask */
void *data; /* dump data area */
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ void *tmpl_hdr;
+ dma_addr_t phys_addr;
+ void *dma_buffer;
+ bool use_pex_dma;
+ /* Read only elements which are common between 82xx and 83xx
+ * template header. Update these values immediately after we read
+ * template header from Firmware
+ */
+ u32 tmpl_hdr_size;
+ u32 version;
+ u32 num_entries;
+ u32 offset;
};
/*
@@ -377,10 +492,12 @@ struct qlcnic_hardware_context {
u8 diag_test;
u8 num_msix;
u8 nic_mode;
- char diag_cnt;
+ int diag_cnt;
+ u16 max_uc_count;
u16 port_type;
u16 board_type;
+ u16 supported_type;
u16 link_speed;
u16 link_duplex;
@@ -393,15 +510,36 @@ struct qlcnic_hardware_context {
u16 max_rx_ques;
u16 max_mtu;
u32 msg_enable;
- u16 act_pci_func;
+ u16 total_nic_func;
+ u16 max_pci_func;
+ u32 max_vnic_func;
+ u32 total_pci_func;
u32 capabilities;
+ u32 extra_capability[3];
u32 temp;
u32 int_vec_bit;
u32 fw_hal_version;
+ u32 port_config;
struct qlcnic_hardware_ops *hw_ops;
struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_fw_dump fw_dump;
+ struct qlcnic_fdt fdt;
+ struct qlc_83xx_reset reset;
+ struct qlc_83xx_idc idc;
+ struct qlc_83xx_fw_info *fw_info;
+ struct qlcnic_intrpt_config *intr_tbl;
+ struct qlcnic_sriov *sriov;
+ u32 *reg_tbl;
+ u32 *ext_reg_tbl;
+ u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
+ u32 mbox_reg[4];
+ struct qlcnic_mailbox *mailbox;
+ u8 extend_lb_time;
+ u8 phys_port_id[ETH_ALEN];
+ u8 lb_mode;
+ u16 vxlan_port;
+ struct device *hwmon_dev;
};
struct qlcnic_adapter_stats {
@@ -416,12 +554,17 @@ struct qlcnic_adapter_stats {
u64 txbytes;
u64 lrobytes;
u64 lso_frames;
+ u64 encap_lso_frames;
+ u64 encap_tx_csummed;
+ u64 encap_rx_csummed;
u64 xmit_on;
u64 xmit_off;
u64 skb_alloc_failure;
u64 null_rxbuf;
u64 rx_dma_map_error;
u64 tx_dma_map_error;
+ u64 spurious_intr;
+ u64 mac_filter_limit_overrun;
};
/*
@@ -447,6 +590,7 @@ struct qlcnic_host_sds_ring {
u32 num_desc;
void __iomem *crb_sts_consumer;
+ struct qlcnic_host_tx_ring *tx_ring;
struct status_desc *desc_head;
struct qlcnic_adapter *adapter;
struct napi_struct napi;
@@ -456,22 +600,42 @@ struct qlcnic_host_sds_ring {
int irq;
dma_addr_t phys_addr;
- char name[IFNAMSIZ+4];
+ char name[IFNAMSIZ + 12];
} ____cacheline_internodealigned_in_smp;
+struct qlcnic_tx_queue_stats {
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+ u64 tx_bytes;
+};
+
struct qlcnic_host_tx_ring {
+ int irq;
+ void __iomem *crb_intr_mask;
+ char name[IFNAMSIZ + 12];
u16 ctx_id;
+
+ u32 state;
u32 producer;
u32 sw_consumer;
u32 num_desc;
+
+ struct qlcnic_tx_queue_stats tx_stats;
+
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
struct qlcnic_cmd_buffer *cmd_buf_arr;
__le32 *hw_consumer;
dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr;
struct netdev_queue *txq;
+ /* Lock to protect Tx descriptors cleanup */
+ spinlock_t tx_clean_lock;
} ____cacheline_internodealigned_in_smp;
/*
@@ -486,14 +650,11 @@ struct qlcnic_recv_context {
u32 state;
u16 context_id;
u16 virt_port;
-
};
/* HW context creation */
#define QLCNIC_OS_CRB_RETRY_COUNT 4000
-#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
- (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
#define QLCNIC_CDRP_CMD_BIT 0x80000000
@@ -513,43 +674,6 @@ struct qlcnic_recv_context {
* the crb QLCNIC_CDRP_CRB_OFFSET.
*/
#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
-#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
-
-#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
-#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
-#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
-#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
-#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
-#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
-#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
-#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
-#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
-#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
-#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011
-#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
-#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
-#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
-#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
-#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
-#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
-#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
-#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
-#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
-
-#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
-#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
-#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
-#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
-#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
-#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
-#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
-#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
-#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
-#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
#define QLCNIC_RCODE_INVALID_ARGS 6
@@ -570,6 +694,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
#define QLCNIC_CAP0_LRO_MSS (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI (1 << 22)
/*
* Context state
@@ -597,7 +722,7 @@ struct qlcnic_hostrq_rds_ring {
struct qlcnic_hostrq_rx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
- __le32 capabilities[4]; /* Flag bit vector */
+ __le32 capabilities[4]; /* Flag bit vector */
__le32 host_int_crb_mode; /* Interrupt crb usage */
__le32 host_rds_crb_mode; /* RDS crb usage */
/* These ring offsets are relative to data[0] below */
@@ -721,11 +846,17 @@ struct qlcnic_cardrsp_tx_ctx {
#define QLCNIC_MAC_VLAN_ADD 3
#define QLCNIC_MAC_VLAN_DEL 4
-struct qlcnic_mac_list_s {
+struct qlcnic_mac_vlan_list {
struct list_head list;
uint8_t mac_addr[ETH_ALEN+2];
+ u16 vlan_id;
};
+/* MAC Learn */
+#define NO_MAC_LEARN 0
+#define DRV_MAC_LEARN 1
+#define FDB_MAC_LEARN 2
+
#define QLCNIC_HOST_REQUEST 0x13
#define QLCNIC_REQUEST 0x14
@@ -736,6 +867,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_ILB_MODE 0x1
#define QLCNIC_ELB_MODE 0x2
+#define QLCNIC_LB_MODE_MASK 0x3
#define QLCNIC_LINKEVENT 0x1
#define QLCNIC_LB_RESPONSE 0x2
@@ -762,7 +894,8 @@ struct qlcnic_mac_list_s {
*/
#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
-#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
+#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -775,10 +908,19 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_BDG BIT_8
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
+#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
+#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
+#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -855,7 +997,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_MSI_ENABLED 0x02
#define QLCNIC_MSIX_ENABLED 0x04
-#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_LRO_ENABLED 0x01
#define QLCNIC_LRO_DISABLED 0x00
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
@@ -869,15 +1011,33 @@ struct qlcnic_ipaddr {
#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_FW_HANG 0x4000
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
+#define QLCNIC_TX_INTR_SHARED 0x10000
+#define QLCNIC_APP_CHANGED_FLAGS 0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+#define QLCNIC_TSS_RSS 0x80000
+
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLCNIC_ADD_VXLAN_PORT 0x100000
+#define QLCNIC_DEL_VXLAN_PORT 0x200000
+#endif
+
+#define QLCNIC_VLAN_FILTERING 0x800000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+#define QLCNIC_IS_TSO_CAPABLE(adapter) \
+ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+
+#define QLCNIC_BEACON_EANBLE 0xC
+#define QLCNIC_BEACON_DISABLE 0xD
+
+#define QLCNIC_BEACON_ON 2
+#define QLCNIC_BEACON_OFF 0
-#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
-#define QLCNIC_NETDEV_WEIGHT 128
#define QLCNIC_ADAPTER_UP_MAGIC 777
#define __QLCNIC_FW_ATTACHED 0
@@ -887,6 +1047,13 @@ struct qlcnic_ipaddr {
#define __QLCNIC_AER 5
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
+#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_MULTI_TX_UNIQUE 9
+#define __QLCNIC_SRIOV_ENABLE 10
+#define __QLCNIC_SRIOV_CAPABLE 11
+#define __QLCNIC_MBX_POLL_ENABLE 12
+#define __QLCNIC_DIAG_MODE 13
+#define __QLCNIC_MAINTENANCE_MODE 16
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -895,24 +1062,36 @@ struct qlcnic_ipaddr {
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
-
-/* QLCNIC Driver Error Code */
-#define QLCNIC_FW_NOT_RESPOND 51
-#define QLCNIC_TEST_IN_PROGRESS 52
-#define QLCNIC_UNDEFINED_ERROR 53
-#define QLCNIC_LB_CABLE_NOT_CONN 54
+#define QLCNIC_LB_BUCKET_SIZE 32
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
struct qlcnic_filter {
struct hlist_node fnode;
u8 faddr[ETH_ALEN];
- __le16 vlan_id;
+ u16 vlan_id;
unsigned long ftime;
};
struct qlcnic_filter_hash {
struct hlist_head *fhead;
u8 fnum;
- u8 fmax;
+ u16 fmax;
+ u16 fbucket_size;
+};
+
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+ struct workqueue_struct *work_q;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_mbx_ops *ops;
+ struct work_struct work;
+ struct completion completion;
+ struct list_head cmd_q;
+ unsigned long status;
+ spinlock_t queue_lock; /* Mailbox queue lock */
+ spinlock_t aen_lock; /* Mailbox response/AEN lock */
+ atomic_t rsp_status;
+ u32 num_cmds;
};
struct qlcnic_adapter {
@@ -925,7 +1104,6 @@ struct qlcnic_adapter {
unsigned long state;
u32 flags;
- int max_drv_tx_rings;
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
@@ -933,16 +1111,28 @@ struct qlcnic_adapter {
u16 max_jumbo_rxd;
u8 max_rds_rings;
- u8 max_sds_rings;
+
+ u8 max_sds_rings; /* max sds rings supported by adapter */
+ u8 max_tx_rings; /* max tx rings supported by adapter */
+
+ u8 drv_tx_rings; /* max tx rings supported by driver */
+ u8 drv_sds_rings; /* max sds rings supported by driver */
+
+ u8 drv_tss_rings; /* tss ring input */
+ u8 drv_rss_rings; /* rss ring input */
+
+ u8 rx_csum;
u8 portnum;
u8 fw_wait_cnt;
u8 fw_fail_cnt;
u8 tx_timeo_cnt;
u8 need_fw_reset;
+ u8 reset_ctx_cnt;
u16 is_up;
- u16 pvid;
+ u16 rx_pvid;
+ u16 tx_pvid;
u32 irq;
u32 heartbeat;
@@ -954,8 +1144,11 @@ struct qlcnic_adapter {
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
- u8 mac_learn;
+ bool drv_mac_learn;
+ bool fdb_mac_learn;
+ bool rx_mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u8 flash_mfg_id;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -969,14 +1162,22 @@ struct qlcnic_adapter {
void __iomem *isr_int_vec;
struct msix_entry *msix_entries;
+ struct workqueue_struct *qlcnic_wq;
struct delayed_work fw_work;
+ struct delayed_work idc_aen_work;
+ struct delayed_work mbx_poll_work;
+ struct qlcnic_dcb *dcb;
struct qlcnic_filter_hash fhash;
+ struct qlcnic_filter_hash rx_fhash;
+ struct list_head vf_mc_list;
- spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
+ /* spinlock for catching rcv filters for eswitch traffic */
+ spinlock_t rx_mac_learn_lock;
u32 file_prd_off; /*File fw product offset*/
u32 fw_version;
+ u32 offload_flags;
const struct firmware *fw;
};
@@ -995,7 +1196,28 @@ struct qlcnic_info_le {
__le16 max_rx_ques;
__le16 min_tx_bw;
__le16 max_tx_bw;
- u8 reserved2[104];
+ __le32 op_type;
+ __le16 max_bw_reg_offset;
+ __le16 max_linkspeed_reg_offset;
+ __le32 capability1;
+ __le32 capability2;
+ __le32 capability3;
+ __le16 max_tx_mac_filters;
+ __le16 max_rx_mcast_mac_filters;
+ __le16 max_rx_ucast_mac_filters;
+ __le16 max_rx_ip_addr;
+ __le16 max_rx_lro_flow;
+ __le16 max_rx_status_rings;
+ __le16 max_rx_buf_rings;
+ __le16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ __le16 max_vports;
+ __le16 linkstate_reg_offset;
+ __le16 bit_offsets;
+ __le16 max_local_ipv6_addrs;
+ __le16 max_remote_ipv6_addrs;
+ u8 reserved2[56];
} __packed;
struct qlcnic_info {
@@ -1005,12 +1227,32 @@ struct qlcnic_info {
u16 switch_mode;
u32 capabilities;
u8 max_mac_filters;
- u8 reserved1;
u16 max_mtu;
u16 max_tx_ques;
u16 max_rx_ques;
u16 min_tx_bw;
u16 max_tx_bw;
+ u32 op_type;
+ u16 max_bw_reg_offset;
+ u16 max_linkspeed_reg_offset;
+ u32 capability1;
+ u32 capability2;
+ u32 capability3;
+ u16 max_tx_mac_filters;
+ u16 max_rx_mcast_mac_filters;
+ u16 max_rx_ucast_mac_filters;
+ u16 max_rx_ip_addr;
+ u16 max_rx_lro_flow;
+ u16 max_rx_status_rings;
+ u16 max_rx_buf_rings;
+ u16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ u16 max_vports;
+ u16 linkstate_reg_offset;
+ u16 bit_offsets;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
};
struct qlcnic_pci_info_le {
@@ -1024,7 +1266,9 @@ struct qlcnic_pci_info_le {
__le16 reserved1[2];
u8 mac[ETH_ALEN];
- u8 reserved2[106];
+ __le16 func_count;
+ u8 reserved2[104];
+
} __packed;
struct qlcnic_pci_info {
@@ -1035,9 +1279,11 @@ struct qlcnic_pci_info {
u16 tx_min_bw;
u16 tx_max_bw;
u8 mac[ETH_ALEN];
+ u16 func_count;
};
struct qlcnic_npar_info {
+ bool eswitch_status;
u16 pvid;
u16 min_bw;
u16 max_bw;
@@ -1052,6 +1298,7 @@ struct qlcnic_npar_info {
u8 promisc_mode;
u8 offload_flags;
u8 pci_func;
+ u8 mac[ETH_ALEN];
};
struct qlcnic_eswitch {
@@ -1074,6 +1321,7 @@ struct qlcnic_eswitch {
#define QL_STATUS_INVALID_PARAM -1
#define MAX_BW 100 /* % of link speed */
+#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
#define DEFAULT_MAC_LEARN 1
@@ -1088,7 +1336,7 @@ struct qlcnic_pci_func_cfg {
u16 port_num;
u8 pci_func;
u8 func_state;
- u8 def_mac_addr[6];
+ u8 def_mac_addr[ETH_ALEN];
};
struct qlcnic_npar_func_cfg {
@@ -1257,7 +1505,6 @@ struct qlcnic_esw_statistics {
struct __qlcnic_esw_statistics tx;
};
-#define QLCNIC_DUMP_MASK_DEF 0x1f
#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1266,34 +1513,40 @@ struct qlcnic_esw_statistics {
#define QLCNIC_RESET_QUIESCENT 0xadd00020
struct _cdrp_cmd {
- u32 cmd;
- u32 arg1;
- u32 arg2;
- u32 arg3;
+ u32 num;
+ u32 *arg;
};
struct qlcnic_cmd_args {
- struct _cdrp_cmd req;
- struct _cdrp_cmd rsp;
+ struct completion completion;
+ struct list_head list;
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+ atomic_t rsp_status;
+ int pay_size;
+ u32 rsp_opcode;
+ u32 total_cmds;
+ u32 op_type;
+ u32 type;
+ u32 cmd_op;
+ u32 *hdr; /* Back channel message header */
+ u32 *pay; /* Back channel message payload */
+ u8 func_num;
};
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
-
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
-int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
-void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
-void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) < (high)) && ((addr) >= (low)))
-#define QLCRD32(adapter, off) \
- (qlcnic_hw_read_wx_2M(adapter, off))
+#define QLCRD32(adapter, off, err) \
+ (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
+
#define QLCWR32(adapter, off, val) \
- (qlcnic_hw_write_wx_2M(adapter, off, val))
+ adapter->ahw->hw_ops->write_reg(adapter, off, val)
int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
@@ -1306,10 +1559,6 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
#define qlcnic_phy_unlock(a) \
qlcnic_pcie_sem_unlock((a), 3)
-#define qlcnic_api_lock(a) \
- qlcnic_pcie_sem_lock((a), 5, 0)
-#define qlcnic_api_unlock(a) \
- qlcnic_pcie_sem_unlock((a), 5)
#define qlcnic_sw_lock(a) \
qlcnic_pcie_sem_lock((a), 6, 0)
#define qlcnic_sw_unlock(a) \
@@ -1324,14 +1573,14 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define MAX_CTL_CHECK 1000
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
-int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
/* Functions from qlcnic_init.c */
+void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
@@ -1356,59 +1605,49 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring);
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
void qlcnic_set_multi(struct net_device *netdev);
-void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
-void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
+int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features);
int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
-void qlcnic_fetch_mac(u32, u32, u8, u8 *);
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
/* Functions from qlcnic_ethtool.c */
-int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *);
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
-int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
-void qlcnic_dev_request_reset(struct qlcnic_adapter *);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-
-/* Management functions */
-int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
-int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
-int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
+void qlcnic_set_drv_version(struct qlcnic_adapter *);
/* eSwitch management functions */
int qlcnic_config_switch_port(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+
int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
@@ -1418,27 +1657,51 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
struct __qlcnic_esw_statistics *);
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
-extern int qlcnic_config_tso;
-int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
-void qlcnic_napi_del(struct qlcnic_adapter *adapter);
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd);
+
int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
void qlcnic_set_vlan_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *);
+void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_detach(struct qlcnic_adapter *);
+void qlcnic_teardown_intr(struct qlcnic_adapter *);
+int qlcnic_attach(struct qlcnic_adapter *);
+int __qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_restore_indev_addr(struct net_device *, unsigned long);
+
+int qlcnic_check_temp(struct qlcnic_adapter *);
+int qlcnic_init_pci_info(struct qlcnic_adapter *);
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_reset_npar_config(struct qlcnic_adapter *);
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+int qlcnic_read_mac_addr(struct qlcnic_adapter *);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ u16 *);
/*
* QLOGIC Board information
@@ -1462,30 +1725,555 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
-static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+struct qlcnic_nic_template {
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*start_firmware) (struct qlcnic_adapter *);
+ int (*init_driver) (struct qlcnic_adapter *);
+ void (*request_reset) (struct qlcnic_adapter *, u32);
+ void (*cancel_idc_work) (struct qlcnic_adapter *);
+ int (*napi_add)(struct qlcnic_adapter *, struct net_device *);
+ void (*napi_del)(struct qlcnic_adapter *);
+ void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
+ irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+ int (*shutdown)(struct pci_dev *);
+ int (*resume)(struct qlcnic_adapter *);
+};
+
+struct qlcnic_mbx_ops {
+ int (*enqueue_cmd) (struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *, unsigned long *);
+ void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+void qlcnic_update_stats(struct qlcnic_adapter *);
+
+/* Adapter hardware abstraction */
+struct qlcnic_hardware_ops {
+ void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
+ int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
+ void (*get_ocm_win) (struct qlcnic_hardware_context *);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+ int (*setup_intr) (struct qlcnic_adapter *);
+ int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+ int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*get_func_no) (struct qlcnic_adapter *);
+ int (*api_lock) (struct qlcnic_adapter *);
+ void (*api_unlock) (struct qlcnic_adapter *);
+ void (*add_sysfs) (struct qlcnic_adapter *);
+ void (*remove_sysfs) (struct qlcnic_adapter *);
+ void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *);
+ int (*create_rx_ctx) (struct qlcnic_adapter *);
+ int (*create_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+ void (*del_rx_ctx) (struct qlcnic_adapter *);
+ void (*del_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ int (*setup_link_event) (struct qlcnic_adapter *, int);
+ int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
+ int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
+ int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
+ int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
+ void (*napi_enable) (struct qlcnic_adapter *);
+ void (*napi_disable) (struct qlcnic_adapter *);
+ int (*config_intr_coal) (struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+ int (*config_rss) (struct qlcnic_adapter *, int);
+ int (*config_hw_lro) (struct qlcnic_adapter *, int);
+ int (*config_loopback) (struct qlcnic_adapter *, u8);
+ int (*clear_loopback) (struct qlcnic_adapter *, u8);
+ int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+ void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ int (*get_board_info) (struct qlcnic_adapter *);
+ void (*set_mac_filter_count) (struct qlcnic_adapter *);
+ void (*free_mac_list) (struct qlcnic_adapter *);
+ int (*read_phys_port_id) (struct qlcnic_adapter *);
+ pci_ers_result_t (*io_error_detected) (struct pci_dev *,
+ pci_channel_state_t);
+ pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
+ void (*io_resume) (struct pci_dev *);
+ void (*get_beacon_state)(struct qlcnic_adapter *);
+ void (*enable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*disable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*enable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ void (*disable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ u32 (*get_saved_state)(void *, u32);
+ void (*set_saved_state)(void *, u32, u32);
+ void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+ u32 (*get_cap_size)(void *, int);
+ void (*set_sys_info)(void *, int, u32);
+ void (*store_cap_mask)(void *, u32);
+};
+
+extern struct qlcnic_nic_template qlcnic_vf_ops;
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
+}
+
+static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->start_firmware(adapter);
+}
+
+static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size);
+}
+
+static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
+}
+
+static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off, u32 data)
+{
+ return adapter->ahw->hw_ops->write_reg(adapter, off, data);
+}
+
+static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
+ u8 *mac, u8 function)
+{
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
+}
+
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->setup_intr(adapter);
+}
+
+static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 arg)
+{
+ return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg);
+}
+
+static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (adapter->ahw->hw_ops->mbx_cmd)
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+
+ return -EIO;
+}
+
+static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_func_no(adapter);
+}
+
+static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->api_lock(adapter);
+}
+
+static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->api_unlock(adapter);
+}
+
+static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->add_sysfs)
+ adapter->ahw->hw_ops->add_sysfs(adapter);
+}
+
+static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->remove_sysfs)
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
+}
+
+static inline void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring);
+}
+
+static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->create_rx_ctx(adapter);
+}
+
+static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr,
+ int ring)
+{
+ return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
+}
+
+static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->del_rx_ctx(adapter);
+}
+
+static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr)
+{
+ return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
+}
+
+static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->setup_link_event(adapter, enable);
+}
+
+static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u8 id)
+{
+ return adapter->ahw->hw_ops->get_nic_info(adapter, info, id);
+}
+
+static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *info)
+{
+ return adapter->ahw->hw_ops->get_pci_info(adapter, info);
+}
+
+static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ return adapter->ahw->hw_ops->set_nic_info(adapter, info);
+}
+
+static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
+ u8 *addr, u16 id, u8 cmd)
+{
+ return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
+}
+
+static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ return adapter->nic_ops->napi_add(adapter, netdev);
+}
+
+static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->napi_del(adapter);
+}
+
+static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_enable(adapter);
+}
+
+static inline int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ return adapter->nic_ops->shutdown(pdev);
+}
+
+static inline int __qlcnic_resume(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->resume(adapter);
+}
+
+static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_disable(adapter);
+}
+
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
+}
+
+static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ return adapter->ahw->hw_ops->config_rss(adapter, enable);
+}
+
+static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->config_hw_lro(adapter, enable);
+}
+
+static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->clear_loopback(adapter, mode);
+}
+
+static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+ u32 mode)
+{
+ return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode);
+}
+
+static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 *addr, u16 id)
+{
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+}
+
+static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->get_board_info(adapter);
+}
+
+static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->free_mac_list(adapter);
+}
+
+static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->set_mac_filter_count)
+ adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+}
+
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->read_phys_port_id)
+ adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index)
+{
+ return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index, u32 value)
+{
+ adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_dump *fw_dump)
+{
+ adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int index)
+{
+ return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int idx, u32 value)
+{
+ adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, u32 mask)
+{
+ adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
+static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ if (adapter->nic_ops->request_reset)
+ adapter->nic_ops->request_reset(adapter, key);
+}
+
+static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ if (adapter->nic_ops->cancel_idc_work)
+ adapter->nic_ops->cancel_idc_work(adapter);
+}
+
+static inline irqreturn_t
+qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->clear_legacy_intr(adapter);
+}
+
+static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 rate)
+{
+ return adapter->nic_ops->config_led(adapter, state, rate);
+}
+
+static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
+}
+
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
writel(0, sds_ring->crb_intr_mask);
}
-static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+ test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0x1, sds_ring->crb_intr_mask);
+ else
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
+ if (adapter->ahw->hw_ops->enable_sds_intr)
+ adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
- writel(0x1, sds_ring->crb_intr_mask);
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->disable_sds_intr)
+ adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->enable_tx_intr)
+ adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->disable_tx_intr)
+ adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0, sds_ring->crb_intr_mask);
+ else
+ writel(0x1, sds_ring->crb_intr_mask);
if (!QLCNIC_IS_MSI_FAMILY(adapter))
writel(0xfbff, adapter->tgt_mask_reg);
}
+static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter)
+{
+ return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter)
+{
+ clear_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
-struct qlcnic_nic_template {
- int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
- int (*config_led) (struct qlcnic_adapter *, u32, u32);
- int (*start_firmware) (struct qlcnic_adapter *);
-};
-
#define QLCDB(adapter, lvl, _fmt, _args...) do { \
if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
printk(KERN_INFO "%s: %s: " _fmt, \
@@ -1493,11 +2281,98 @@ struct qlcnic_nic_template {
__func__, ##_args); \
} while (0)
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
+
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
}
+static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+}
+
+static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+
+ return status;
+}
+
+static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
+{
+ return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
+}
+
+static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+
+ return status;
+}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+ bool status;
+
+ status = (qlcnic_sriov_pf_check(adapter) ||
+ qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+ return status;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_84xx_check(adapter))
+ return QLC_84XX_VNIC_COUNT;
+ else
+ return QLC_DEFAULT_VNIC_COUNT;
+}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+#endif
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
new file mode 100644
index 00000000000..a4a4ec0b68f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -0,0 +1,4065 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+#define QLC_83XX_FW_MBX_CMD 0
+#define QLC_SKIP_INACTIVE_PCI_REGS 7
+#define QLC_MAX_LEGACY_FUNC_SUPP 8
+
+static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
+ {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
+ {QLCNIC_CMD_CREATE_RX_CTX, 136, 27},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 54, 18},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 22, 12},
+ {QLCNIC_CMD_SET_MTU, 3, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
+ {QLCNIC_CMD_GET_NIC_INFO, 2, 19},
+ {QLCNIC_CMD_SET_NIC_INFO, 32, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 1, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 5, 5},
+ {QLCNIC_CMD_GET_LINK_EVENT, 2, 1},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1},
+ {QLCNIC_CMD_CONFIGURE_RSS, 14, 1},
+ {QLCNIC_CMD_CONFIGURE_LED, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1},
+ {QLCNIC_CMD_GET_STATISTICS, 2, 80},
+ {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1},
+ {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
+ {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
+ {QLCNIC_CMD_IDC_ACK, 5, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
+ {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
+ {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+ {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1},
+ {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+ {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
+ {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
+ {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+};
+
+const u32 qlcnic_83xx_ext_reg_tbl[] = {
+ 0x38CC, /* Global Reset */
+ 0x38F0, /* Wildcard */
+ 0x38FC, /* Informant */
+ 0x3038, /* Host MBX ctrl */
+ 0x303C, /* FW MBX ctrl */
+ 0x355C, /* BOOT LOADER ADDRESS REG */
+ 0x3560, /* BOOT LOADER SIZE REG */
+ 0x3564, /* FW IMAGE ADDR REG */
+ 0x1000, /* MBX intr enable */
+ 0x1200, /* Default Intr mask */
+ 0x1204, /* Default Interrupt ID */
+ 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */
+ 0x3784, /* QLC_83XX_IDC_DEV_STATE */
+ 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */
+ 0x378C, /* QLC_83XX_IDC_DRV_ACK */
+ 0x3790, /* QLC_83XX_IDC_CTRL */
+ 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */
+ 0x3798, /* QLC_83XX_IDC_MIN_VERSION */
+ 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */
+ 0x37A0, /* QLC_83XX_IDC_PF_0 */
+ 0x37A4, /* QLC_83XX_IDC_PF_1 */
+ 0x37A8, /* QLC_83XX_IDC_PF_2 */
+ 0x37AC, /* QLC_83XX_IDC_PF_3 */
+ 0x37B0, /* QLC_83XX_IDC_PF_4 */
+ 0x37B4, /* QLC_83XX_IDC_PF_5 */
+ 0x37B8, /* QLC_83XX_IDC_PF_6 */
+ 0x37BC, /* QLC_83XX_IDC_PF_7 */
+ 0x37C0, /* QLC_83XX_IDC_PF_8 */
+ 0x37C4, /* QLC_83XX_IDC_PF_9 */
+ 0x37C8, /* QLC_83XX_IDC_PF_10 */
+ 0x37CC, /* QLC_83XX_IDC_PF_11 */
+ 0x37D0, /* QLC_83XX_IDC_PF_12 */
+ 0x37D4, /* QLC_83XX_IDC_PF_13 */
+ 0x37D8, /* QLC_83XX_IDC_PF_14 */
+ 0x37DC, /* QLC_83XX_IDC_PF_15 */
+ 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */
+ 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */
+ 0x37F0, /* QLC_83XX_DRV_OP_MODE */
+ 0x37F4, /* QLC_83XX_VNIC_STATE */
+ 0x3868, /* QLC_83XX_DRV_LOCK */
+ 0x386C, /* QLC_83XX_DRV_UNLOCK */
+ 0x3504, /* QLC_83XX_DRV_LOCK_ID */
+ 0x34A4, /* QLC_83XX_ASIC_TEMP */
+};
+
+const u32 qlcnic_83xx_reg_tbl[] = {
+ 0x34A8, /* PEG_HALT_STAT1 */
+ 0x34AC, /* PEG_HALT_STAT2 */
+ 0x34B0, /* FW_HEARTBEAT */
+ 0x3500, /* FLASH LOCK_ID */
+ 0x3528, /* FW_CAPABILITIES */
+ 0x3538, /* Driver active, DRV_REG0 */
+ 0x3540, /* Device state, DRV_REG1 */
+ 0x3544, /* Driver state, DRV_REG2 */
+ 0x3548, /* Driver scratch, DRV_REG3 */
+ 0x354C, /* Device partiton info, DRV_REG4 */
+ 0x3524, /* Driver IDC ver, DRV_REG5 */
+ 0x3550, /* FW_VER_MAJOR */
+ 0x3554, /* FW_VER_MINOR */
+ 0x3558, /* FW_VER_SUB */
+ 0x359C, /* NPAR STATE */
+ 0x35FC, /* FW_IMG_VALID */
+ 0x3650, /* CMD_PEG_STATE */
+ 0x373C, /* RCV_PEG_STATE */
+ 0x37B4, /* ASIC TEMP */
+ 0x356C, /* FW API */
+ 0x3570, /* DRV OP MODE */
+ 0x3850, /* FLASH LOCK */
+ 0x3854, /* FLASH UNLOCK */
+};
+
+static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_83xx_issue_cmd,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .add_sysfs = qlcnic_83xx_add_sysfs,
+ .remove_sysfs = qlcnic_83xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+ .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
+ .io_error_detected = qlcnic_83xx_io_error_detected,
+ .io_slot_reset = qlcnic_83xx_io_slot_reset,
+ .io_resume = qlcnic_83xx_io_resume,
+ .get_beacon_state = qlcnic_83xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
+ .get_saved_state = qlcnic_83xx_get_saved_state,
+ .set_saved_state = qlcnic_83xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_83xx_get_cap_size,
+ .set_sys_info = qlcnic_83xx_set_sys_info,
+ .store_cap_mask = qlcnic_83xx_store_cap_mask,
+};
+
+static struct qlcnic_nic_template qlcnic_83xx_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .request_reset = qlcnic_83xx_idc_request_reset,
+ .cancel_idc_work = qlcnic_83xx_idc_exit,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+ .shutdown = qlcnic_83xx_shutdown,
+ .resume = qlcnic_83xx_resume,
+};
+
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_83xx_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ struct pci_dev *pdev = adapter->pdev;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ return adapter->fw_version;
+}
+
+static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
+{
+ void __iomem *base;
+ u32 val;
+
+ base = adapter->ahw->pci_base0 +
+ QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func);
+ writel(addr, base);
+ val = readl(base);
+ if (val != addr)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ int *err)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ *err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!*err) {
+ return QLCRDX(ahw, QLCNIC_WILDCARD);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%lx\n", __func__, addr);
+ return -EIO;
+ }
+}
+
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ u32 data)
+{
+ int err;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!err) {
+ QLCWRX(ahw, QLCNIC_WILDCARD, data);
+ return 0;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x data = 0x%x\n",
+ __func__, (int)addr, data);
+ return err;
+ }
+}
+
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ /* account for AEN interrupt MSI-X based interrupts */
+ num_msix += 1;
+
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ num_msix += adapter->drv_tx_rings;
+
+ return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i, num_msix;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = ahw->num_msix;
+ } else {
+ num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ num_msix = ahw->num_msix;
+ } else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+ num_msix = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ }
+ }
+
+ /* setup interrupt mapping table for fw */
+ ahw->intr_tbl = vzalloc(num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+ dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+ ahw->pci_func);
+ return -EOPNOTSUPP;
+ }
+
+ qlcnic_83xx_enable_legacy(adapter);
+ }
+
+ for (i = 0; i < num_msix; i++) {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ else
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ writel(0, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ if (adapter->tgt_mask_reg)
+ writel(1, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+ *adapter)
+{
+ u32 mask;
+
+ /* Mailbox in MSI-x mode and Legacy Interrupt share the same
+ * source register. We could be here before contexts are created
+ * and sds_ring->crb_intr_mask has not been initialized, calculate
+ * BAR offset for Interrupt Source Register
+ */
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+}
+
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 mask;
+
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(1, adapter->ahw->pci_base0 + mask);
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+}
+
+static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
+ for (i = 0; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
+}
+
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 intr_val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int retries = 0;
+
+ intr_val = readl(adapter->tgt_status_reg);
+
+ if (!QLC_83XX_VALID_INTX_BIT31(intr_val))
+ return IRQ_NONE;
+
+ if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) {
+ adapter->stats.spurious_intr++;
+ return IRQ_NONE;
+ }
+ /* The barrier is required to ensure writes to the registers */
+ wmb();
+
+ /* clear the interrupt trigger control register */
+ writel(0, adapter->isr_int_vec);
+ intr_val = readl(adapter->isr_int_vec);
+ do {
+ intr_val = readl(adapter->tgt_status_reg);
+ if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func)
+ break;
+ retries++;
+ } while (QLC_83XX_VALID_INTX_BIT30(intr_val) &&
+ (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY));
+
+ return IRQ_HANDLED;
+}
+
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+ atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ complete(&mbx->completion);
+}
+
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
+out:
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+irqreturn_t qlcnic_83xx_intr(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ qlcnic_83xx_poll_process_aen(adapter);
+
+ if (ahw->diag_test) {
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+ ahw->diag_cnt++;
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ return IRQ_HANDLED;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ } else {
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ napi_schedule(&sds_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+
+ if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+
+ return IRQ_HANDLED;
+}
+
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 num_msix;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_set_legacy_intr_mask(adapter);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix - 1;
+ else
+ num_msix = 0;
+
+ msleep(20);
+
+ if (adapter->msix_entries) {
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+ }
+}
+
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ u32 val;
+ int err = 0;
+ unsigned long flags = 0;
+
+ if (!(adapter->flags & QLCNIC_MSI_ENABLED) &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ flags |= IRQF_SHARED;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ handler = qlcnic_83xx_handle_aen;
+ val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
+ err = request_irq(val, handler, flags, "qlcnic-MB", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register MBX interrupt\n");
+ return err;
+ }
+ } else {
+ handler = qlcnic_83xx_intr;
+ val = adapter->msix_entries[0].vector;
+ err = request_irq(val, handler, flags, "qlcnic", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register INTx interrupt\n");
+ return err;
+ }
+ qlcnic_83xx_clear_legacy_intr_mask(adapter);
+ }
+
+ /* Enable mailbox interrupt */
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ return err;
+}
+
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
+ adapter->ahw->pci_func = (val >> 24) & 0xff;
+}
+
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val, limit = 0;
+
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func);
+ do {
+ val = readl(addr);
+ if (val) {
+ /* write the function number to register */
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ ahw->pci_func);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT);
+
+ return -EIO;
+}
+
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
+ val = readl(addr);
+}
+
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int ret = 0;
+ u32 data;
+
+ if (qlcnic_api_lock(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to acquire lock. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+
+ data = QLCRD32(adapter, (u32) offset, &ret);
+ qlcnic_api_unlock(adapter);
+
+ if (ret == -EIO) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+ memcpy(buf, &data, size);
+}
+
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+
+ memcpy(&data, buf, size);
+ qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data);
+}
+
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get Port Info failed\n");
+ } else {
+ if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config))
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ else
+ adapter->ahw->port_type = QLCNIC_GBE;
+
+ if (QLC_83XX_AUTONEG(adapter->ahw->port_config))
+ adapter->ahw->link_autoneg = AUTONEG_ENABLE;
+ }
+ return status;
+}
+
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 act_pci_fn = ahw->total_nic_func;
+ u16 count;
+
+ ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
+ if (act_pci_fn <= 2)
+ count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) /
+ act_pci_fn;
+ else
+ count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) /
+ act_pci_fn;
+ ahw->max_uc_count = count;
+}
+
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8);
+ else
+ val = BIT_2;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+}
+
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->fw_hal_version = 2;
+ qlcnic_get_func_no(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ qlcnic_sriov_vf_set_ops(adapter);
+ return;
+ }
+
+ /* Determine function privilege level */
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else {
+ if (pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV))
+ set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
+ adapter->nic_ops = &qlcnic_83xx_ops;
+ }
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
+ dev_info(&adapter->pdev->dev,
+ "Host MBX regs(%d)\n", cmd->req.num);
+ for (i = 0; i < cmd->req.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->req.arg[i]);
+ }
+ pr_info("\n");
+ dev_info(&adapter->pdev->dev,
+ "FW MBX regs(%d)\n", cmd->rsp.num);
+ for (i = 0; i < cmd->rsp.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->rsp.arg[i]);
+ }
+ pr_info("\n");
+}
+
+static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int opcode = LSW(cmd->req.arg[0]);
+ unsigned long max_loops;
+
+ max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
+
+ for (; max_loops; max_loops--) {
+ if (atomic_read(&cmd->rsp_status) ==
+ QLC_83XX_MBX_RESPONSE_ARRIVED)
+ return;
+
+ udelay(1);
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+ flush_workqueue(ahw->mailbox->work_q);
+ return;
+}
+
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int cmd_type, err, opcode;
+ unsigned long timeout;
+
+ if (!mbx)
+ return -EIO;
+
+ opcode = LSW(cmd->req.arg[0]);
+ cmd_type = cmd->type;
+ err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
+ }
+
+ switch (cmd_type) {
+ case QLC_83XX_MBX_CMD_WAIT:
+ if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
+ }
+ break;
+ case QLC_83XX_MBX_CMD_NO_WAIT:
+ return 0;
+ case QLC_83XX_MBX_CMD_BUSY_WAIT:
+ qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ }
+
+ return cmd->rsp_opcode;
+}
+
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ u32 temp;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
+ mbx_tbl = qlcnic_83xx_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_83XX_FW_MBX_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ temp = adapter->ahw->fw_hal_version << 29;
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ mbx->cmd_op = type;
+ return 0;
+ }
+ }
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+ __func__, type);
+ return -EINVAL;
+}
+
+void qlcnic_83xx_idc_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+ if (err)
+ return;
+
+ for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
+ cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "%s: Mailbox IDC ACK failed.\n", __func__);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(data[0]));
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status);
+ return;
+}
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 event[QLC_83XX_MBX_AEN_CNT];
+ int i;
+
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ event[i] = readl(QLCNIC_MBX_FW(ahw, i));
+
+ switch (QLCNIC_MBX_RSP(event[0])) {
+
+ case QLCNIC_MBX_LINK_EVENT:
+ qlcnic_83xx_handle_link_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_COMP_EVENT:
+ qlcnic_83xx_handle_idc_comp_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_REQUEST_EVENT:
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]);
+ queue_delayed_work(adapter->qlcnic_wq,
+ &adapter->idc_aen_work, 0);
+ break;
+ case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ ahw->extend_lb_time = event[1] >> 8 & 0xf;
+ break;
+ case QLCNIC_MBX_BC_EVENT:
+ qlcnic_sriov_handle_bc_event(adapter, event[1]);
+ break;
+ case QLCNIC_MBX_SFP_INSERT_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_SFP_REMOVE_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
+ break;
+ default:
+ dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ }
+
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+ if (resp & QLCNIC_SET_OWNER) {
+ event = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
+ }
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+
+ adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
+
+ if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_83xx_process_aen(adapter);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work,
+ (HZ / 10));
+}
+
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
+}
+
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+ cancel_delayed_work_sync(&adapter->mbx_poll_work);
+}
+
+static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
+{
+ int index, i, err, sds_mbx_size;
+ u32 *buf, intrpt_id, intr_mask;
+ u16 context_id;
+ u8 num_sds;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_add_rings_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ context_id = recv_ctx->context_id;
+ num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
+ ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
+
+ /* set up status rings, mbx 2-81 */
+ index = 2;
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+ sds_mbx.sds_ring_size = sds->num_desc;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to add rings %d\n", err);
+ goto out;
+ }
+
+ mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
+ index = 0;
+ /* status descriptor ring */
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[index];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ index++;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = recv_ctx->context_id | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int i, err, index, sds_mbx_size, rds_mbx_size;
+ u8 num_sds, num_rds;
+ u32 *buf, intrpt_id, intr_mask, cap = 0;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_host_rds_ring *rds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_rds_mbx rds_mbx;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ num_rds = adapter->max_rds_rings;
+
+ if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+ num_sds = adapter->drv_sds_rings;
+ else
+ num_sds = QLCNIC_MAX_SDS_RINGS;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
+ cap = QLCNIC_CAP0_LEGACY_CONTEXT;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLC_83XX_FW_CAP_LRO_MSS;
+
+ /* set mailbox hdr and capabilities */
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CREATE_RX_CTX);
+ if (err)
+ return err;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ cmd.req.arg[1] = cap;
+ cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
+ (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
+ &cmd.req.arg[6]);
+ /* set up status rings, mbx 8-57/87 */
+ index = QLC_83XX_HOST_SDS_MBX_IDX;
+ for (i = 0; i < num_sds; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+ sds_mbx.sds_ring_size = sds->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+ /* set up receive rings, mbx 88-111/135 */
+ index = QLCNIC_HOST_RDS_MBX_IDX;
+ rds = &recv_ctx->rds_rings[0];
+ rds->producer = 0;
+ memset(&rds_mbx, 0, rds_mbx_size);
+ rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
+ rds_mbx.reg_ring_sz = rds->dma_size;
+ rds_mbx.reg_ring_len = rds->num_desc;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->producer = 0;
+ rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
+ rds_mbx.jmb_ring_sz = rds->dma_size;
+ rds_mbx.jmb_ring_len = rds->num_desc;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &rds_mbx, rds_mbx_size);
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Rx ctx in firmware%d\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1];
+ recv_ctx->context_id = mbx_out->ctx_id;
+ recv_ctx->state = mbx_out->state;
+ recv_ctx->virt_port = mbx_out->vport_id;
+ dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ /* Receive descriptor ring */
+ /* Standard ring */
+ rds = &recv_ctx->rds_rings[0];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].reg_buf;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].jmb_buf;
+ /* status descriptor ring */
+ for (i = 0; i < num_sds; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[i];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
+ err = qlcnic_83xx_add_rings(adapter);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 temp = 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = tx_ring->ctx_id | temp;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx, int ring)
+{
+ int err;
+ u16 msix_id;
+ u32 *buf, intr_mask, temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_tx_mbx mbx;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 msix_vector;
+
+ /* Reset host resources */
+ tx->producer = 0;
+ tx->sw_consumer = 0;
+ *(tx->hw_consumer) = 0;
+
+ memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
+
+ /* setup mailbox inbox registerss */
+ mbx.phys_addr_low = LSD(tx->phys_addr);
+ mbx.phys_addr_high = MSD(tx->phys_addr);
+ mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
+ mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
+ mbx.size = tx->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ msix_vector = adapter->drv_sds_rings + ring;
+ else
+ msix_vector = adapter->drv_sds_rings - 1;
+ msix_id = ahw->intr_tbl[msix_vector].id;
+ } else {
+ msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ }
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ mbx.intr_id = msix_id;
+ else
+ mbx.intr_id = 0xffff;
+ mbx.src = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ if (err)
+ return err;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
+ cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
+ buf = &cmd.req.arg[6];
+ memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
+ /* send the mailbox command*/
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
+ tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
+ tx->ctx_id = mbx_out->ctx_id;
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
+ tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+ netdev_info(adapter->netdev,
+ "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
+ u8 num_sds_ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ u16 adapter_state = adapter->is_up;
+ u8 ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
+ adapter->drv_sds_rings = num_sds_ring;
+ qlcnic_attach(adapter);
+ }
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ adapter->ahw->loopback_state = 0;
+ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+ u8 drv_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+ qlcnic_detach(adapter);
+
+ adapter->ahw->diag_test = 0;
+ adapter->drv_sds_rings = drv_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+out:
+ netif_device_attach(netdev);
+}
+
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ beacon_state = cmd.rsp.arg[4];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLC_83XX_BEACON_OFF;
+ else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+ ahw->beacon_state = QLC_83XX_BEACON_ON;
+ }
+ } else {
+ netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+ err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return;
+}
+
+int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 beacon)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_in;
+ int i, status = 0;
+
+ if (state) {
+ /* Get LED configuration */
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_CONFIG);
+ if (status)
+ return status;
+
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get led config failed.\n");
+ goto mbx_err;
+ } else {
+ for (i = 0; i < 4; i++)
+ adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1];
+ }
+ qlcnic_free_mbx_args(&cmd);
+ /* Set LED Configuration */
+ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
+ LSW(QLC_83XX_LED_CONFIG);
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ if (status)
+ return status;
+
+ cmd.req.arg[1] = mbx_in;
+ cmd.req.arg[2] = mbx_in;
+ cmd.req.arg[3] = mbx_in;
+ if (beacon)
+ cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Set led config failed.\n");
+ }
+mbx_err:
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+
+ } else {
+ /* Restoring default LED configuration */
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ if (status)
+ return status;
+
+ cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
+ cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
+ cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
+ if (beacon)
+ cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Restoring led config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+ }
+}
+
+int qlcnic_83xx_set_led(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EIO, active = 1;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "LED test is not supported in non-privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to set LED blink state\n");
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to reset LED blink state\n");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int status;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
+ if (enable)
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ else
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_STOP_NIC_FUNC);
+
+ if (status)
+ return;
+
+ cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+ if (adapter->dcb)
+ cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s in NIC IDC function event.\n",
+ (enable ? "register" : "unregister"));
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = adapter->ahw->port_config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Set Port Config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Get Port config failed\n");
+ else
+ adapter->ahw->port_config = cmd.rsp.arg[1];
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Setup linkevent mailbox failed\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ adapter->rx_mac_learn = true;
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_cmd_args *cmd = NULL;
+ u32 temp = 0;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+ if (err)
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+ qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
+
+ if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+ cmd->req.arg[1] = mode | temp;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+
+out:
+ kfree(cmd);
+ return err;
+}
+
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u8 drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ int ret = 0, loop = 0;
+
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "Loopback test not supported in non privileged mode\n");
+ return -ENOTSUPP;
+ }
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
+ if (qlcnic_get_diag_lock(adapter)) {
+ netdev_info(netdev, "Device is in diagnostics mode\n");
+ return -EBUSY;
+ }
+
+ netdev_info(netdev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
+ drv_sds_rings);
+ if (ret)
+ goto fail_diag_alloc;
+
+ ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_diag_res;
+
+ /* Poll for link up event before running traffic */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ ret = -EBUSY;
+ goto free_diag_res;
+ }
+ if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ goto free_diag_res;
+ }
+ } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_alloc:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ qlcnic_release_diag_lock(adapter);
+ return ret;
+}
+
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+ u32 *max_wait_count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int temp;
+
+ netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n",
+ ahw->extend_lb_time);
+ temp = ahw->extend_lb_time * 1000;
+ *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+ ahw->extend_lb_time = 0;
+}
+
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ u32 config, max_wait_count;
+ int status = 0, loop = 0;
+
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status)
+ return status;
+
+ config = ahw->port_config;
+
+ /* Check if port is already in loopback mode */
+ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+ (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+ netdev_err(netdev,
+ "Port already in Loopback mode.\n");
+ return -EINPROGRESS;
+ }
+
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_err(netdev,
+ "Failed to Set Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EBUSY;
+ }
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ return -ETIMEDOUT;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_ADD);
+ return status;
+}
+
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = ahw->port_config, max_wait_count;
+ struct net_device *netdev = adapter->netdev;
+ int status = 0, loop = 0;
+
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_err(netdev,
+ "Failed to Clear Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EBUSY;
+ }
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -ETIMEDOUT;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_DEL);
+ return status;
+}
+
+static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
+ int mode)
+{
+ int err;
+ u32 temp = 0, temp_ip;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_IP_ADDR);
+ if (err)
+ return;
+
+ qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
+
+ if (mode == QLCNIC_IP_UP)
+ cmd.req.arg[1] = 1 | temp;
+ else
+ cmd.req.arg[1] = 2 | temp;
+
+ /*
+ * Adapter needs IP address in network byte order.
+ * But hardware mailbox registers go through writel(), hence IP address
+ * gets swapped on big endian architecture.
+ * To negate swapping of writel() on big endian architecture
+ * use swab32(value).
+ */
+
+ temp_ip = swab32(ntohl(ip));
+ memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
+{
+ int err;
+ u32 temp, arg1;
+ struct qlcnic_cmd_args cmd;
+ int lro_bit_mask;
+
+ lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0);
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id << 16;
+ arg1 = lro_bit_mask | temp;
+ cmd.req.arg[1] = arg1;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "LRO config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 word;
+ struct qlcnic_cmd_args cmd;
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+ if (err)
+ return err;
+ /*
+ * RSS request:
+ * bits 3-0: Rsvd
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 16-31: indirection table mask
+ */
+ word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u32)(enable & 0x1) << 8) |
+ ((0x7ULL) << 16);
+ cmd.req.arg[1] = (adapter->recv_ctx->context_id);
+ cmd.req.arg[2] = word;
+ memcpy(&cmd.req.arg[4], key, sizeof(key));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_info(&adapter->pdev->dev, "RSS config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ u16 vlan_id, u8 op)
+{
+ struct qlcnic_cmd_args *cmd = NULL;
+ struct qlcnic_macvlan_mbx mv;
+ u32 *buf, temp = 0;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+
+ if (vlan_id)
+ op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+
+ cmd->req.arg[1] = op | (1 << 8);
+ qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
+ cmd->req.arg[1] |= temp;
+ mv.vlan = vlan_id;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd->req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+out:
+ kfree(cmd);
+ return err;
+}
+
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan_id)
+{
+ u8 mac[ETH_ALEN];
+ memcpy(&mac, addr, ETH_ALEN);
+ qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
+}
+
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
+{
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ memcpy(&cmd->req.arg[2], mac, sizeof(u32));
+ memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
+ break;
+ }
+ cmd->req.arg[1] = type;
+}
+
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ function = 0;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ if (err)
+ return err;
+
+ qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+ temp = coal->rx_time_us;
+ cmd.req.arg[2] = coal->rx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->tx_ring->ctx_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+ temp = coal->tx_time_us;
+ cmd.req.arg[2] = coal->tx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Rx coalescing parameters\n");
+
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Tx coalescing parameters\n");
+
+ return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u32 rx_coalesce_usecs, rx_max_frames;
+ u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+ tx_max_frames = ethcoal->tx_max_coalesced_frames;
+ rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+ rx_max_frames = ethcoal->rx_max_coalesced_frames;
+ coal->flag = QLCNIC_INTR_DEFAULT;
+
+ if ((coal->rx_time_us == rx_coalesce_usecs) &&
+ (coal->rx_packets == rx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+ (coal->tx_packets == tx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ } else {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ }
+
+ switch (coal->type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_RX_TX:
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ break;
+ default:
+ err = -EINVAL;
+ netdev_err(adapter->netdev,
+ "Invalid Interrupt coalescing type\n");
+ break;
+ }
+
+ return err;
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u8 link_status, duplex;
+ /* link speed */
+ link_status = LSB(data[3]) & 1;
+ if (link_status) {
+ ahw->link_speed = MSW(data[2]);
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ ahw->link_duplex = DUPLEX_FULL;
+ else
+ ahw->link_duplex = DUPLEX_HALF;
+ } else {
+ ahw->link_speed = SPEED_UNKNOWN;
+ ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+
+ ahw->link_autoneg = MSB(MSW(data[3]));
+ ahw->module_type = MSB(LSW(data[3]));
+ ahw->has_link_events = 1;
+ ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
+ qlcnic_advert_link_change(adapter, link_status);
+}
+
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_mailbox *mbx;
+ u32 mask, resp, event;
+ unsigned long flags;
+
+ mbx = adapter->ahw->mailbox;
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ __qlcnic_83xx_process_aen(adapter);
+ else
+ qlcnic_83xx_notify_mbx_response(mbx);
+
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ return IRQ_HANDLED;
+}
+
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int i, err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (nic->pci_func << 16);
+ cmd.req.arg[2] = 0x1 << 16;
+ cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
+ cmd.req.arg[4] = nic->capabilities;
+ cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
+ cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
+ cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
+ for (i = 8; i < 32; i++)
+ cmd.req.arg[i] = 0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ u32 temp;
+ u8 op = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ return err;
+
+ if (func_id != ahw->pci_func) {
+ temp = func_id << 16;
+ cmd.req.arg[1] = op | BIT_31 | temp;
+ } else {
+ cmd.req.arg[1] = ahw->pci_func << 16;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to get nic info %d\n", err);
+ goto out;
+ }
+
+ npar_info->op_type = cmd.rsp.arg[1];
+ npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF;
+ npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16;
+ npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF;
+ npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16;
+ npar_info->capabilities = cmd.rsp.arg[4];
+ npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF;
+ npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16;
+ npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF;
+ npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16;
+ npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF;
+ npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16;
+ if (cmd.rsp.arg[8] & 0x1)
+ npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1;
+ if (cmd.rsp.arg[8] & 0x10000) {
+ temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
+ npar_info->max_linkspeed_reg_offset = temp;
+ }
+
+ memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
+ sizeof(ahw->extra_capability));
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err = 0;
+
+ switch (type) {
+ case QLCNIC_TYPE_NIC:
+ (*nic)++;
+ break;
+ case QLCNIC_TYPE_FCOE:
+ (*fcoe)++;
+ break;
+ case QLCNIC_TYPE_ISCSI:
+ (*iscsi)++;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ __func__, type);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0, j = 0;
+ u32 temp;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ ahw->total_nic_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
+ for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
+ pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
+ pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ i++;
+ if (!pci_info->active) {
+ i += QLC_SKIP_INACTIVE_PCI_REGS;
+ continue;
+ }
+ pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->default_port = temp;
+ i++;
+ pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->tx_max_bw = temp;
+ i = i + 2;
+ memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2);
+ i++;
+ memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
+ i = i + 3;
+ }
+ } else {
+ dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
+ err = -EIO;
+ }
+
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
+{
+ int i, index, err;
+ u8 max_ints;
+ u32 val, temp, type;
+ struct qlcnic_cmd_args cmd;
+
+ max_ints = adapter->ahw->num_msix - 1;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = max_ints;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
+
+ for (i = 0, index = 2; i < max_ints; i++) {
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (adapter->ahw->intr_tbl[i].type << 4);
+ if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (adapter->ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[index++] = val;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure interrupts 0x%x\n", err);
+ goto out;
+ }
+
+ max_ints = cmd.rsp.arg[1];
+ for (i = 0, index = 2; i < max_ints; i++, index += 2) {
+ val = cmd.rsp.arg[index];
+ if (LSB(val)) {
+ dev_info(&adapter->pdev->dev,
+ "Can't configure interrupt %d\n",
+ adapter->ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ adapter->ahw->intr_tbl[i].id = MSW(val);
+ adapter->ahw->intr_tbl[i].enabled = 1;
+ temp = cmd.rsp.arg[index + 1];
+ adapter->ahw->intr_tbl[i].src = temp;
+ } else {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter)
+{
+ int id, timeout = 0;
+ u32 status = 0;
+
+ while (status == 0) {
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (status)
+ break;
+
+ if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) {
+ id = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FLASH_LOCK_OWNER);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, lock held by %d\n", __func__, id);
+ return -EIO;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum);
+ return 0;
+}
+
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter)
+{
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF);
+}
+
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
+ u32 flash_addr, u8 *p_data,
+ int count)
+{
+ u32 word, range, flash_offset, addr = flash_addr, ret;
+ ulong indirect_add, direct_window;
+ int i, err = 0;
+
+ flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr));
+
+ range = flash_offset + (count * sizeof(u32));
+ /* Check if data is spread across multiple sectors */
+ if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+ direct_window = QLC_83XX_FLASH_DIRECT_WINDOW;
+ /* This write is needed once for each sector */
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ direct_window,
+ (addr));
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+ int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+ int err = 0;
+
+ do {
+ status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
+ if ((status & QLC_83XX_FLASH_STATUS_READY) ==
+ QLC_83XX_FLASH_STATUS_READY)
+ break;
+
+ msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY);
+ } while (--retries);
+
+ if (!retries)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
+{
+ int ret;
+ u32 cmd;
+ cmd = adapter->ahw->fdt.write_statusreg_cmd;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_enable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG |
+ adapter->ahw->fdt.write_statusreg_cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_disable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
+{
+ int ret, err = 0;
+ u32 mfg_id;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return err;
+ }
+
+ adapter->flash_mfg_id = (mfg_id & 0xFF);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
+{
+ int count, fdt_size, ret = 0;
+
+ fdt_size = sizeof(struct qlcnic_fdt);
+ count = fdt_size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ memset(&adapter->ahw->fdt, 0, fdt_size);
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
+ (u8 *)&adapter->ahw->fdt,
+ count);
+
+ qlcnic_83xx_unlock_flash(adapter);
+ return ret;
+}
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
+ u32 sector_start_addr)
+{
+ u32 reversed_addr, addr1, addr2, cmd;
+ int ret = -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ addr1 = (sector_start_addr & 0xFF) << 16;
+ addr2 = (sector_start_addr & 0xFF0000) >> 16;
+ reversed_addr = addr1 | addr2;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ reversed_addr);
+ cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd;
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id)
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_ERASE_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data)
+{
+ int ret = -EIO;
+ u32 addr1 = 0x00800000 | (addr >> 2);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data, int count)
+{
+ u32 temp;
+ int ret = -EIO, err = 0;
+
+ if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_WRITE_MAX)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid word count\n", __func__);
+ return -EIO;
+ }
+
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL);
+
+ /* First DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_FIRST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ count--;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL);
+ /* Second to N-1 DWORD writes */
+ while (count != 1) {
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ count--;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL |
+ (addr >> 2));
+ /* Last DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
+ if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
+ dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
+ __func__, __LINE__);
+ /* Operation failed, clear error bit */
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
+{
+ u32 val, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+
+ /* Check if recovery need to be performed by the calling function */
+ if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) {
+ val = val & ~0x3F;
+ val = val | ((adapter->portnum << 2) |
+ QLC_83XX_NEED_DRV_LOCK_RECOVERY);
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated\n", __func__);
+ msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+ id = ((val >> 2) & 0xF);
+ if (id == adapter->portnum) {
+ val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK;
+ val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ /* Force release the lock */
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+ /* Clear recovery bits */
+ val = val & ~0x3F;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery completed\n", __func__);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: func %d to resume lock recovery process\n",
+ __func__, id);
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated by other functions\n",
+ __func__);
+ }
+}
+
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0;
+ int max_attempt = 0;
+
+ while (status == 0) {
+ status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK);
+ if (status)
+ break;
+
+ msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ i++;
+
+ if (i == 1)
+ temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+
+ if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ if (val == temp) {
+ id = val & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: lock to be recovered from %d\n",
+ __func__, id);
+ qlcnic_83xx_recover_driver_lock(adapter);
+ i = 0;
+ max_attempt++;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ /* Force exit from while loop after few attempts */
+ if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ lock_alive_counter = val >> 8;
+ lock_alive_counter++;
+ val = lock_alive_counter << 8 | adapter->portnum;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+
+ return 0;
+}
+
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 val, lock_alive_counter, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = val & 0xFF;
+ lock_alive_counter = val >> 8;
+
+ if (id != adapter->portnum)
+ dev_err(&adapter->pdev->dev,
+ "%s:Warning func %d is unlocking lock owned by %d\n",
+ __func__, adapter->portnum, id);
+
+ val = (lock_alive_counter << 8) | 0xFF;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+}
+
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+ u32 *data, u32 count)
+{
+ int i, j, ret = 0;
+ u32 temp;
+
+ /* Check alignment */
+ if (addr & 0xF)
+ return -EIO;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) ||
+ (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
+
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failure */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_WARNING
+ "MS memory write failed\n");
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
+ u8 *p_data, int count)
+{
+ u32 word, addr = flash_addr, ret;
+ ulong indirect_addr;
+ int i, err = 0;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr))) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_addr, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
+{
+ u8 pci_func;
+ int err;
+ u32 config = 0, state;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ pci_func = adapter->portnum;
+ else
+ pci_func = ahw->pci_func;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
+ dev_info(&adapter->pdev->dev, "link state down\n");
+ return config;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Get Link Status Command failed: 0x%x\n", err);
+ goto out;
+ } else {
+ config = cmd.rsp.arg[1];
+ switch (QLC_83XX_CURRENT_LINK_SPEED(config)) {
+ case QLC_83XX_10M_LINK:
+ ahw->link_speed = SPEED_10;
+ break;
+ case QLC_83XX_100M_LINK:
+ ahw->link_speed = SPEED_100;
+ break;
+ case QLC_83XX_1G_LINK:
+ ahw->link_speed = SPEED_1000;
+ break;
+ case QLC_83XX_10G_LINK:
+ ahw->link_speed = SPEED_10000;
+ break;
+ default:
+ ahw->link_speed = 0;
+ break;
+ }
+ config = cmd.rsp.arg[3];
+ if (QLC_83XX_SFP_PRESENT(config)) {
+ switch (ahw->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ahw->supported_type = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ahw->supported_type = PORT_TP;
+ break;
+ default:
+ ahw->supported_type = PORT_OTHER;
+ }
+ }
+ if (config & 1)
+ err = 1;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return config;
+}
+
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ u32 config = 0;
+ int status = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ }
+
+ /* hard code until there is a way to get it from flash */
+ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+ ecmd->duplex = ahw->link_duplex;
+ ecmd->autoneg = ahw->link_autoneg;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ }
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ } else {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+ }
+
+ switch (ahw->supported_type) {
+ case PORT_FIBRE:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case PORT_TP:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_OTHER;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ }
+ ecmd->phy_address = ahw->physical_port;
+ return status;
+}
+
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ int status = 0;
+ u32 config = adapter->ahw->port_config;
+
+ if (ecmd->autoneg)
+ adapter->ahw->port_config |= BIT_15;
+
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ adapter->ahw->port_config |= BIT_8;
+ break;
+ case SPEED_100:
+ adapter->ahw->port_config |= BIT_9;
+ break;
+ case SPEED_1000:
+ adapter->ahw->port_config |= BIT_10;
+ break;
+ case SPEED_10000:
+ adapter->ahw->port_config |= BIT_11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to Set Link Speed and autoneg.\n");
+ adapter->ahw->port_config = config;
+ }
+ return status;
+}
+
+static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd,
+ u64 *data, int index)
+{
+ u32 low, hi;
+ u64 val;
+
+ low = cmd->rsp.arg[index];
+ hi = cmd->rsp.arg[index + 1];
+ val = (((u64) low) | (((u64) hi) << 32));
+ *data++ = val;
+ return data;
+}
+
+static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd, u64 *data,
+ int type, int *ret)
+{
+ int err, k, total_regs;
+
+ *ret = 0;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Error in get statistics mailbox command\n");
+ *ret = -EIO;
+ return data;
+ }
+ total_regs = cmd->rsp.num;
+ switch (type) {
+ case QLC_83XX_STAT_MAC:
+ /* fill in MAC tx counters */
+ for (k = 2; k < 28; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx counters */
+ for (k += 6; k < 60; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx frame stats */
+ for (k += 6; k < 80; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* fill in eSwitch stats */
+ for (; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_RX:
+ for (k = 2; k < 8; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < 24; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes containing RE1FBQ error data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_TX:
+ for (k = 2; k < 10; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n");
+ *ret = -EIO;
+ }
+ return data;
+}
+
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
+{
+ struct qlcnic_cmd_args cmd;
+ struct net_device *netdev = adapter->netdev;
+ int ret = 0;
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+ if (ret)
+ return;
+ /* Get Tx stats */
+ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
+ cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_TX, &ret);
+ if (ret) {
+ netdev_err(netdev, "Error getting Tx stats\n");
+ goto out;
+ }
+ /* Get MAC stats */
+ cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
+ cmd.rsp.num = QLC_83XX_MAC_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_MAC, &ret);
+ if (ret) {
+ netdev_err(netdev, "Error getting MAC stats\n");
+ goto out;
+ }
+ /* Get Rx stats */
+ cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
+ cmd.rsp.num = QLC_83XX_RX_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_RX, &ret);
+ if (ret)
+ netdev_err(netdev, "Error getting Rx stats\n");
+out:
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
+{
+ u32 major, minor, sub;
+
+ major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) {
+ dev_info(&adapter->pdev->dev, "%s: Reg test failed\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+{
+ return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
+ sizeof(*adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+ sizeof(*adapter->ahw->reg_tbl));
+}
+
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1;
+ j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j);
+
+ for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++)
+ regs_buff[i++] = QLCRDX(adapter->ahw, j);
+ return i;
+}
+
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 val, drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ u32 data;
+ u16 intrpt_id, id;
+ int ret;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
+ if (qlcnic_get_diag_lock(adapter)) {
+ netdev_info(netdev, "Device in diagnostics mode\n");
+ return -EBUSY;
+ }
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
+ drv_sds_rings);
+ if (ret)
+ goto fail_diag_irq;
+
+ ahw->diag_cnt = 0;
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ if (ret)
+ goto fail_diag_irq;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[0].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ cmd.req.arg[1] = 1;
+ cmd.req.arg[2] = intrpt_id;
+ cmd.req.arg[3] = BIT_0;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ data = cmd.rsp.arg[2];
+ id = LSW(data);
+ val = LSB(MSW(data));
+ if (id != intrpt_id)
+ dev_info(&adapter->pdev->dev,
+ "Interrupt generated: 0x%x, requested:0x%x\n",
+ id, intrpt_id);
+ if (val)
+ dev_err(&adapter->pdev->dev,
+ "Interrupt test error: 0x%x\n", val);
+ if (ret)
+ goto done;
+
+ msleep(20);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_irq:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ qlcnic_release_diag_lock(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed\n", __func__);
+ return;
+ }
+ config = ahw->port_config;
+ if (config & QLC_83XX_CFG_STD_PAUSE) {
+ switch (MSW(config)) {
+ case QLC_83XX_TX_PAUSE:
+ pause->tx_pause = 1;
+ break;
+ case QLC_83XX_RX_PAUSE:
+ pause->rx_pause = 1;
+ break;
+ case QLC_83XX_TX_RX_PAUSE:
+ default:
+ /* Backward compatibility for existing
+ * flash definitions
+ */
+ pause->tx_pause = 1;
+ pause->rx_pause = 1;
+ }
+ }
+
+ if (QLC_83XX_AUTONEG(config))
+ pause->autoneg = 1;
+}
+
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed.\n", __func__);
+ return status;
+ }
+ config = ahw->port_config;
+
+ if (ahw->port_type == QLCNIC_GBE) {
+ if (pause->autoneg)
+ ahw->port_config |= QLC_83XX_ENABLE_AUTONEG;
+ if (!pause->autoneg)
+ ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG;
+ } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (!(config & QLC_83XX_CFG_STD_PAUSE))
+ ahw->port_config |= QLC_83XX_CFG_STD_PAUSE;
+
+ if (pause->rx_pause && pause->tx_pause) {
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ } else if (pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE;
+ } else if (pause->tx_pause && !pause->rx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
+ } else if (!pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+ QLC_83XX_CFG_STD_PAUSE);
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Set Pause Config failed.\n", __func__);
+ ahw->port_config = config;
+ }
+ return status;
+}
+
+static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ int ret, err = 0;
+ u32 temp;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_READ_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO)
+ return err;
+
+ return temp & 0xFF;
+}
+
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_read_flash_status_reg(adapter);
+ if (status == -EIO) {
+ dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int err = 0;
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ return err;
+
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ qlcnic_83xx_set_vnic_opmode(adapter);
+ } else {
+ err = qlcnic_83xx_check_vnic_state(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ err = qlcnic_83xx_idc_reattach_driver(adapter);
+ if (err)
+ return err;
+
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ idc->delay);
+ return err;
+}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+ reinit_completion(&mbx->completion);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+ if (!mbx)
+ return;
+
+ destroy_workqueue(mbx->work_q);
+ kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+ complete(&cmd->completion);
+}
+
+static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ spin_lock(&mbx->queue_lock);
+
+ while (!list_empty(head)) {
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+ __func__, cmd->cmd_op);
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+
+ spin_unlock(&mbx->queue_lock);
+}
+
+static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ u32 host_mbx_ctrl;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+ return -EBUSY;
+
+ host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (host_mbx_ctrl) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ ahw->idc.collect_dump = 1;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+ u8 issue_cmd)
+{
+ if (issue_cmd)
+ QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+ else
+ QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+
+ spin_unlock(&mbx->queue_lock);
+
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, j;
+
+ if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+ } else {
+ fw_hal_version = ahw->fw_hal_version;
+ hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+ total_size = cmd->pay_size + hdr_size;
+ tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+ mbx_cmd = tmp | fw_hal_version << 29;
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+ /* Back channel specific operations bits */
+ mbx_cmd = 0x1 | 1 << 4;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= cmd->func_num << 5;
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+ for (j = 0; j < cmd->pay_size; j++, i++)
+ writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+ }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (!mbx)
+ return;
+
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ complete(&mbx->completion);
+ cancel_work_sync(&mbx->work);
+ flush_workqueue(mbx->work_q);
+ qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd,
+ unsigned long *timeout)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ init_completion(&cmd->completion);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_add_tail(&cmd->list, &mbx->cmd_q);
+ mbx->num_cmds++;
+ cmd->total_cmds = mbx->num_cmds;
+ *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ queue_work(mbx->work_q, &mbx->work);
+
+ spin_unlock(&mbx->queue_lock);
+
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 mac_cmd_rcode;
+ u32 fw_data;
+
+ if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ return QLCNIC_RCODE_SUCCESS;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u8 mbx_err_code;
+ u32 fw_data;
+
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+ break;
+
+ dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode, mbx_err_code);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+ qlcnic_dump_mbx(adapter, cmd);
+ }
+
+ return;
+}
+
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 offset;
+
+ offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+ readl(ahw->pci_base0 + offset),
+ QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+ QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+ QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+ struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+ work);
+ struct qlcnic_adapter *adapter = mbx->adapter;
+ struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ struct device *dev = &adapter->pdev->dev;
+ atomic_t *rsp_status = &mbx->rsp_status;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ ahw = adapter->ahw;
+
+ while (true) {
+ if (qlcnic_83xx_check_mbx_status(adapter)) {
+ qlcnic_83xx_flush_mbx_queue(adapter);
+ return;
+ }
+
+ atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+
+ spin_lock(&mbx->queue_lock);
+
+ if (list_empty(head)) {
+ spin_unlock(&mbx->queue_lock);
+ return;
+ }
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+ spin_unlock(&mbx->queue_lock);
+
+ mbx_ops->encode_cmd(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+ if (wait_for_completion_timeout(&mbx->completion,
+ QLC_83XX_MBX_TIMEOUT)) {
+ mbx_ops->decode_resp(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+ } else {
+ dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mailbox_registers(adapter);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+ qlcnic_dump_mbx(adapter, cmd);
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+ }
+ mbx_ops->dequeue_cmd(adapter, cmd);
+ }
+}
+
+static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+ .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
+ .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
+ .decode_resp = qlcnic_83xx_decode_mbx_rsp,
+ .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
+ .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx;
+
+ ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+ if (!ahw->mailbox)
+ return -ENOMEM;
+
+ mbx = ahw->mailbox;
+ mbx->ops = &qlcnic_83xx_mbx_ops;
+ mbx->adapter = adapter;
+
+ spin_lock_init(&mbx->queue_lock);
+ spin_lock_init(&mbx->aen_lock);
+ INIT_LIST_HEAD(&mbx->cmd_q);
+ init_completion(&mbx->completion);
+
+ mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+ if (mbx->work_q == NULL) {
+ kfree(mbx);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+ return 0;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_83xx_aer_stop_poll_work(adapter);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ int err = 0;
+
+ pdev->error_state = pci_channel_io_normal;
+ err = pci_enable_device(pdev);
+ if (err)
+ goto disconnect;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ err = qlcnic_83xx_aer_reset(adapter);
+ if (err == 0)
+ return PCI_ERS_RESULT_RECOVERED;
+disconnect:
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ qlcnic_83xx_aer_start_poll_work(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
new file mode 100644
index 00000000000..2bf101a47d0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -0,0 +1,661 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_83XX_HW_H
+#define __QLCNIC_83XX_HW_H
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "qlcnic_hw.h"
+
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+
+/* Directly mapped registers */
+#define QLC_83XX_CRB_WIN_BASE 0x3800
+#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
+#define QLC_83XX_SEM_LOCK_BASE 0x3840
+#define QLC_83XX_SEM_UNLOCK_BASE 0x3844
+#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8))
+#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLC_83XX_LINK_SPEED_FACTOR 10
+#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4)))
+#define QLC_83XX_INTX_PTR 0x38C0
+#define QLC_83XX_INTX_TRGR 0x38C4
+#define QLC_83XX_INTX_MASK 0x38C8
+
+#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100
+#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20
+#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1
+#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2
+#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3
+#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200
+#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3
+#define QLC_83XX_LB_WAIT_COUNT 250
+#define QLC_83XX_LB_MSLEEP_COUNT 20
+#define QLC_83XX_NO_NIC_RESOURCE 0x5
+#define QLC_83XX_MAC_PRESENT 0xC
+#define QLC_83XX_MAC_ABSENT 0xD
+
+
+#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024)
+
+/* PEG status definitions */
+#define QLC_83XX_CMDPEG_COMPLETE 0xff01
+#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30)
+#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31)
+#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF)
+#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100
+#define QLC_83XX_LEGACY_INTX_DELAY 4
+#define QLC_83XX_REG_DESC 1
+#define QLC_83XX_LRO_DESC 2
+#define QLC_83XX_CTRL_DESC 3
+#define QLC_83XX_FW_CAPABILITY_TSO BIT_6
+#define QLC_83XX_FW_CAP_LRO_MSS BIT_17
+#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0
+#define QLC_83XX_HOST_SDS_MBX_IDX 8
+
+#define QLCNIC_HOST_RDS_MBX_IDX 88
+
+/* Pause control registers */
+#define QLC_83XX_SRE_SHIM_REG 0x0D200284
+#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4
+#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4
+#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388
+#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388
+#define QLC_83XX_PORT0_TC_STATS 0x0B20039C
+#define QLC_83XX_PORT1_TC_STATS 0x0B20139C
+#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704
+#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704
+
+/* Peg PC status registers */
+#define QLC_83XX_CRB_PEG_NET_0 0x3400003c
+#define QLC_83XX_CRB_PEG_NET_1 0x3410003c
+#define QLC_83XX_CRB_PEG_NET_2 0x3420003c
+#define QLC_83XX_CRB_PEG_NET_3 0x3430003c
+#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c
+
+/* Firmware image definitions */
+#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
+#define QLC_83XX_BOOT_FROM_FLASH 0
+#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+
+#define QLC_FW_FILE_NAME_LEN 20
+#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+
+#define QLC_83XX_MBX_POST_BC_OP 0x1
+#define QLC_83XX_MBX_COMPLETION 0x0
+#define QLC_83XX_MBX_REQUEST 0x1
+
+#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP 5000000
+
+/* status descriptor mailbox data
+ * @phy_addr_{low|high}: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u32 phy_addr_low;
+ u32 phy_addr_high;
+ u32 rsvd1[4];
+#if defined(__LITTLE_ENDIAN)
+ u16 sds_ring_size;
+ u16 rsvd2;
+ u16 rsvd3[2];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd4;
+#elif defined(__BIG_ENDIAN)
+ u16 rsvd2;
+ u16 sds_ring_size;
+ u16 rsvd3[2];
+ u8 rsvd4;
+ u8 intrpt_val;
+ u16 intrpt_id;
+#endif
+ u32 rsvd5;
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg_{low|high}: physical address of regular buffer
+ * phy_addr_jmb_{low|high}: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u32 phy_addr_reg_low;
+ u32 phy_addr_reg_high;
+ u32 phy_addr_jmb_low;
+ u32 phy_addr_jmb_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+#elif defined(__BIG_ENDIAN)
+ u16 reg_ring_len;
+ u16 reg_ring_sz;
+ u16 jmb_ring_len;
+ u16 jmb_ring_sz;
+#endif
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+ u8 vport_id;
+ u8 phy_port;
+ u8 num_pci_func;
+ u8 state;
+#endif
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+#endif
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr_{low|high}: DMA address of the transmit buffer
+ * @cnsmr_index_{low|high}: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrput id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u32 phys_addr_low;
+ u32 phys_addr_high;
+ u32 cnsmr_index_low;
+ u32 cnsmr_index_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+#elif defined(__BIG_ENDIAN)
+ u16 intr_id;
+ u16 size;
+ u8 rsvd[3];
+ u8 src;
+#endif
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+#if defined(__LITTLE_ENDIAN)
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 state;
+ u16 ctx_id;
+#endif
+} __packed;
+
+struct qlcnic_intrpt_config {
+ u8 type;
+ u8 enabled;
+ u16 id;
+ u32 src;
+};
+
+struct qlcnic_macvlan_mbx {
+#if defined(__LITTLE_ENDIAN)
+ u8 mac_addr0;
+ u8 mac_addr1;
+ u8 mac_addr2;
+ u8 mac_addr3;
+ u8 mac_addr4;
+ u8 mac_addr5;
+ u16 vlan;
+#elif defined(__BIG_ENDIAN)
+ u8 mac_addr3;
+ u8 mac_addr2;
+ u8 mac_addr1;
+ u8 mac_addr0;
+ u16 vlan;
+ u8 mac_addr5;
+ u8 mac_addr4;
+#endif
+};
+
+struct qlc_83xx_fw_info {
+ const struct firmware *fw;
+ char fw_file_name[QLC_FW_FILE_NAME_LEN];
+};
+
+struct qlc_83xx_reset {
+ struct qlc_83xx_reset_hdr *hdr;
+ int seq_index;
+ int seq_error;
+ int array_index;
+ u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES];
+ u8 *buff;
+ u8 *stop_offset;
+ u8 *start_offset;
+ u8 *init_offset;
+ u8 seq_end;
+ u8 template_end;
+};
+
+#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
+#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
+#define QLC_83XX_IDC_TIMESTAMP 0
+#define QLC_83XX_IDC_DURATION 1
+#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
+#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20
+#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ)
+#define QLC_83XX_IDC_FW_FAIL_THRESH 2
+#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8
+#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16
+#define QLC_83XX_IDC_MAJOR_VERSION 1
+#define QLC_83XX_IDC_MINOR_VERSION 0
+#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
+
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+struct qlc_83xx_idc {
+ int (*state_entry) (struct qlcnic_adapter *);
+ u64 sec_counter;
+ u64 delay;
+ unsigned long status;
+ int err_code;
+ int collect_dump;
+ u8 curr_state;
+ u8 prev_state;
+ u8 vnic_state;
+ u8 vnic_wait_limit;
+ u8 quiesce_req;
+ u8 delay_reset;
+ char **name;
+};
+
+enum qlcnic_vlan_operations {
+ QLC_VLAN_ADD = 0,
+ QLC_VLAN_DELETE
+};
+
+/* Device States */
+enum qlcnic_83xx_states {
+ QLC_83XX_IDC_DEV_UNKNOWN,
+ QLC_83XX_IDC_DEV_COLD,
+ QLC_83XX_IDC_DEV_INIT,
+ QLC_83XX_IDC_DEV_READY,
+ QLC_83XX_IDC_DEV_NEED_RESET,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT,
+ QLC_83XX_IDC_DEV_FAILED,
+ QLC_83XX_IDC_DEV_QUISCENT
+};
+
+#define QLCNIC_MBX_RSP(reg) LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
+/* Mailbox process AEN count */
+#define QLC_83XX_IDC_COMP_AEN 3
+#define QLC_83XX_MBX_AEN_CNT 5
+#define QLC_83XX_MODULE_LOADED 1
+#define QLC_83XX_MBX_READY 2
+#define QLC_83XX_MBX_AEN_ACK 3
+#define QLC_83XX_SFP_PRESENT(data) ((data) & 3)
+#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3)
+#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
+#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
+#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
+#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11)
+#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
+#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
+#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
+#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7)
+#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12)
+#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
+#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
+#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE 0x10
+#define QLC_83XX_RX_PAUSE 0x20
+#define QLC_83XX_TX_RX_PAUSE 0x30
+#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
+#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
+#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
+#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20)
+#define QLC_83XX_ENABLE_AUTONEG (1 << 15)
+#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1)
+#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1)
+#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1)
+
+/* LED configuration settings */
+#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_BEACON_ON 1
+#define QLC_83XX_BEACON_OFF 0
+#define QLC_83XX_LED_RATE 0xff
+#define QLC_83XX_LED_ACT (1 << 10)
+#define QLC_83XX_LED_MOD (0 << 13)
+#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \
+ QLC_83XX_LED_MOD)
+
+#define QLC_83XX_10M_LINK 1
+#define QLC_83XX_100M_LINK 2
+#define QLC_83XX_1G_LINK 3
+#define QLC_83XX_10G_LINK 4
+#define QLC_83XX_STAT_TX 3
+#define QLC_83XX_STAT_RX 2
+#define QLC_83XX_STAT_MAC 1
+#define QLC_83XX_TX_STAT_REGS 14
+#define QLC_83XX_RX_STAT_REGS 40
+#define QLC_83XX_MAC_STAT_REGS 94
+
+#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
+#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
+#define QLC_83XX_DEFAULT_OPMODE 0x55555555
+#define QLC_83XX_PRIVLEGED_FUNC 0x1
+#define QLC_83XX_VIRTUAL_FUNC 0x2
+
+#define QLC_83XX_LB_MAX_FILTERS 2048
+#define QLC_83XX_LB_BUCKET_SIZE 256
+#define QLC_83XX_MINIMUM_VECTOR 3
+#define QLC_83XX_MAX_MC_COUNT 38
+#define QLC_83XX_MAX_UC_COUNT 4096
+
+#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
+#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
+#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
+#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
+#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
+#define QLC_83XX_SRIOV_MODE 0x1
+#define QLCNIC_BRDTYPE_83XX_10G 0x0083
+
+#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLC_83XX_FLASH_STATUS 0x42100004
+#define QLC_83XX_FLASH_CONTROL 0x42110004
+#define QLC_83XX_FLASH_ADDR 0x42110008
+#define QLC_83XX_FLASH_WRDATA 0x4211000C
+#define QLC_83XX_FLASH_RDDATA 0x42110018
+#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
+#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
+#define QLC_83XX_FLASH_STATUS_READY 0x6
+#define QLC_83XX_FLASH_WRITE_MIN 2
+#define QLC_83XX_FLASH_WRITE_MAX 64
+#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLC_83XX_ERASE_MODE 1
+#define QLC_83XX_WRITE_MODE 2
+#define QLC_83XX_BULK_WRITE_MODE 3
+#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100
+#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300
+#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F
+#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8
+#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101
+#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005
+#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000
+#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001
+#define QLC_83XX_FLASH_WRDATA_DEF 0x0
+#define QLC_83XX_FLASH_READ_CTRL 0x3F
+#define QLC_83XX_FLASH_SPI_CTRL 0x4
+#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2
+#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D
+#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43
+#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D
+#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
+#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+
+enum qlc_83xx_mbx_cmd_type {
+ QLC_83XX_MBX_CMD_WAIT = 0,
+ QLC_83XX_MBX_CMD_NO_WAIT,
+ QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+ QLC_83XX_MBX_RESPONSE_WAIT = 0,
+ QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
+
+/* Additional registers in 83xx */
+enum qlc_83xx_ext_regs {
+ QLCNIC_GLOBAL_RESET = 0,
+ QLCNIC_WILDCARD,
+ QLCNIC_INFORMANT,
+ QLCNIC_HOST_MBX_CTRL,
+ QLCNIC_FW_MBX_CTRL,
+ QLCNIC_BOOTLOADER_ADDR,
+ QLCNIC_BOOTLOADER_SIZE,
+ QLCNIC_FW_IMAGE_ADDR,
+ QLCNIC_MBX_INTR_ENBL,
+ QLCNIC_DEF_INT_MASK,
+ QLCNIC_DEF_INT_ID,
+ QLC_83XX_IDC_MAJ_VERSION,
+ QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DRV_PRESENCE,
+ QLC_83XX_IDC_DRV_ACK,
+ QLC_83XX_IDC_CTRL,
+ QLC_83XX_IDC_DRV_AUDIT,
+ QLC_83XX_IDC_MIN_VERSION,
+ QLC_83XX_RECOVER_DRV_LOCK,
+ QLC_83XX_IDC_PF_0,
+ QLC_83XX_IDC_PF_1,
+ QLC_83XX_IDC_PF_2,
+ QLC_83XX_IDC_PF_3,
+ QLC_83XX_IDC_PF_4,
+ QLC_83XX_IDC_PF_5,
+ QLC_83XX_IDC_PF_6,
+ QLC_83XX_IDC_PF_7,
+ QLC_83XX_IDC_PF_8,
+ QLC_83XX_IDC_PF_9,
+ QLC_83XX_IDC_PF_10,
+ QLC_83XX_IDC_PF_11,
+ QLC_83XX_IDC_PF_12,
+ QLC_83XX_IDC_PF_13,
+ QLC_83XX_IDC_PF_14,
+ QLC_83XX_IDC_PF_15,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_1,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_2,
+ QLC_83XX_DRV_OP_MODE,
+ QLC_83XX_VNIC_STATE,
+ QLC_83XX_DRV_LOCK,
+ QLC_83XX_DRV_UNLOCK,
+ QLC_83XX_DRV_LOCK_ID,
+ QLC_83XX_ASIC_TEMP,
+};
+
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_83XX_MULTI_TENANCY_INFO BIT_29
+#define QLC_INIT_FW_RESOURCES BIT_31
+
+/* 83xx funcitons */
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
+int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
+int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
+void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
+ struct qlcnic_info *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_intr(int, void *);
+irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
+ const struct pci_device_id *);
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
+void qlcnic_83xx_idc_aen_work(struct work_struct *);
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int);
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
+int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
+int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
+ u32, u8 *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *, int);
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u8);
+int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_test_link(struct qlcnic_adapter *);
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
new file mode 100644
index 00000000000..f33559b7252
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -0,0 +1,2448 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+/* Reset template definitions */
+#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLC_83XX_RESET_SEQ_VERSION 0x0101
+
+#define QLC_83XX_OPCODE_NOP 0x0000
+#define QLC_83XX_OPCODE_WRITE_LIST 0x0001
+#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002
+#define QLC_83XX_OPCODE_POLL_LIST 0x0004
+#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008
+#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010
+#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020
+#define QLC_83XX_OPCODE_SEQ_END 0x0040
+#define QLC_83XX_OPCODE_TMPL_END 0x0080
+#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
+
+/* EPORT control registers */
+#define QLC_83XX_RESET_CONTROL 0x28084E50
+#define QLC_83XX_RESET_REG 0x28084E60
+#define QLC_83XX_RESET_PORT0 0x28084E70
+#define QLC_83XX_RESET_PORT1 0x28084E80
+#define QLC_83XX_RESET_PORT2 0x28084E90
+#define QLC_83XX_RESET_PORT3 0x28084EA0
+#define QLC_83XX_RESET_SRESHIM 0x28084EB0
+#define QLC_83XX_RESET_EPGSHIM 0x28084EC0
+#define QLC_83XX_RESET_ETHERPCS 0x28084ED0
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
+
+/* Template header */
+struct qlc_83xx_reset_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u16 version;
+ u16 signature;
+ u16 size;
+ u16 entries;
+ u16 hdr_size;
+ u16 checksum;
+ u16 init_offset;
+ u16 start_offset;
+#elif defined(__BIG_ENDIAN)
+ u16 signature;
+ u16 version;
+ u16 entries;
+ u16 size;
+ u16 checksum;
+ u16 hdr_size;
+ u16 start_offset;
+ u16 init_offset;
+#endif
+} __packed;
+
+/* Command entry header. */
+struct qlc_83xx_entry_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+#elif defined(__BIG_ENDIAN)
+ u16 size;
+ u16 cmd;
+ u16 delay;
+ u16 count;
+#endif
+} __packed;
+
+/* Generic poll command */
+struct qlc_83xx_poll {
+ u32 mask;
+ u32 status;
+} __packed;
+
+/* Read modify write command */
+struct qlc_83xx_rmw {
+ u32 mask;
+ u32 xor_value;
+ u32 or_value;
+#if defined(__LITTLE_ENDIAN)
+ u8 shl;
+ u8 shr;
+ u8 index_a;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 index_a;
+ u8 shr;
+ u8 shl;
+#endif
+} __packed;
+
+/* Generic command with 2 DWORD */
+struct qlc_83xx_entry {
+ u32 arg1;
+ u32 arg2;
+} __packed;
+
+/* Generic command with 4 DWORD */
+struct qlc_83xx_quad_entry {
+ u32 dr_addr;
+ u32 dr_value;
+ u32 ar_addr;
+ u32 ar_value;
+} __packed;
+static const char *const qlc_83xx_idc_states[] = {
+ "Unknown",
+ "Cold",
+ "Init",
+ "Ready",
+ "Need Reset",
+ "Need Quiesce",
+ "Failed",
+ "Quiesce"
+};
+
+static int
+qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ if ((val & 0xFFFF))
+ return 1;
+ else
+ return 0;
+}
+
+static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter)
+{
+ u32 cur, prev;
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+
+ dev_info(&adapter->pdev->dev,
+ "current state = %s, prev state = %s\n",
+ adapter->ahw->idc.name[cur],
+ adapter->ahw->idc.name[prev]);
+}
+
+static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
+ u8 mode, int lock)
+{
+ u32 val;
+ int seconds;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+ val |= (adapter->portnum & 0xf);
+ val |= mode << 7;
+ if (mode)
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ else
+ seconds = jiffies / HZ;
+
+ val |= seconds << 8;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val);
+ adapter->ahw->idc.sec_counter = jiffies / HZ;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION);
+ val = val & ~(0x3 << (adapter->portnum * 2));
+ val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2));
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val);
+}
+
+static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ val = val & ~0xFF;
+ val = val | QLC_83XX_IDC_MAJOR_VERSION;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter,
+ int status, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+
+ if (status)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ qlcnic_83xx_idc_update_minor_version(adapter);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u8 version;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ version = val & 0xFF;
+
+ if (version != QLC_83XX_IDC_MAJOR_VERSION) {
+ dev_info(&adapter->pdev->dev,
+ "%s:mismatch. version 0x%x, expected version 0x%x\n",
+ __func__, version, QLC_83XX_IDC_MAJOR_VERSION);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
+ /* Clear gracefull reset bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter,
+ int flag, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ if (flag)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter,
+ int time_limit)
+{
+ u64 seconds;
+
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ if (seconds <= time_limit)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+/**
+ * qlcnic_83xx_idc_check_reset_ack_reg
+ *
+ * @adapter: adapter structure
+ *
+ * Check ACK wait limit and clear the functions which failed to ACK
+ *
+ * Return 0 if all functions have acknowledged the reset request.
+ **/
+static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter)
+{
+ int timeout;
+ u32 ack, presence, val;
+
+ timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ dev_info(&adapter->pdev->dev,
+ "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence);
+ if (!((ack & presence) == presence)) {
+ if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) {
+ /* Clear functions which failed to ACK */
+ dev_info(&adapter->pdev->dev,
+ "%s: ACK wait exceeds time limit\n", __func__);
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(ack ^ presence);
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: updated drv presence reg = 0x%x\n",
+ __func__, val);
+ qlcnic_83xx_unlock_driver(adapter);
+ return 0;
+
+ } else {
+ return 1;
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: Reset ACK received from all functions\n",
+ __func__);
+ return 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_tx_soft_reset
+ *
+ * @adapter: adapter structure
+ *
+ * Handle context deletion and recreation request from transmit routine
+ *
+ * Returns -EBUSY or Success (0)
+ *
+ **/
+static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ qlcnic_down(adapter, netdev);
+ qlcnic_up(adapter, netdev);
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_detach_driver
+ *
+ * @adapter: adapter structure
+ * Detach net interface, stop TX and cleanup resources before the HW reset.
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
+{
+ int i;
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+
+ /* Disable mailbox interrupt */
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ qlcnic_down(adapter, netdev);
+ for (i = 0; i < adapter->ahw->num_msix; i++) {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_reset(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_attach_driver
+ *
+ * @adapter: adapter structure
+ *
+ * Re-attach and re-enable net interface
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+}
+
+static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ qlcnic_83xx_idc_log_state_history(adapter);
+ dev_info(&adapter->pdev->dev, "Device will enter failed state\n");
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_RESET);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_find_reset_owner_id
+ *
+ * @adapter: adapter structure
+ *
+ * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE.
+ * Within the same class, function with lowest PCI ID assumes ownership
+ *
+ * Returns: reset owner id or failure indication (-EIO)
+ *
+ **/
+static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter)
+{
+ u32 reg, reg1, reg2, i, j, owner, class;
+
+ reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1);
+ reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2);
+ owner = QLCNIC_TYPE_NIC;
+ i = 0;
+ j = 0;
+ reg = reg1;
+
+ do {
+ class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3);
+ if (class == owner)
+ break;
+ if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) {
+ reg = reg2;
+ j = 0;
+ } else {
+ j++;
+ }
+
+ if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) {
+ if (owner == QLCNIC_TYPE_NIC)
+ owner = QLCNIC_TYPE_ISCSI;
+ else if (owner == QLCNIC_TYPE_ISCSI)
+ owner = QLCNIC_TYPE_FCOE;
+ else if (owner == QLCNIC_TYPE_FCOE)
+ return -EIO;
+ reg = reg1;
+ j = 0;
+ i = 0;
+ }
+ } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS);
+
+ return i;
+}
+
+static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock)
+{
+ int ret = 0;
+
+ ret = qlcnic_83xx_restart_hw(adapter);
+
+ if (ret) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, lock);
+ } else {
+ qlcnic_83xx_idc_clear_registers(adapter, lock);
+ ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock);
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "peg halt status1=0x%x\n", status);
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ qlcnic_83xx_initialize_nic(adapter, 1);
+
+ err = qlcnic_sriov_pf_reinit(adapter);
+ if (err)
+ return err;
+
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->nic_ops->init_driver(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ qlcnic_dcb_get_info(adapter->dcb);
+ qlcnic_83xx_idc_attach_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ ahw->idc.quiesce_req = 0;
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->idc.err_code = 0;
+ ahw->idc.collect_dump = 0;
+ ahw->reset_context = 0;
+ adapter->tx_timeo_cnt = 0;
+ ahw->idc.delay_reset = 0;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Perform ready state initialization, this routine will get invoked only
+ * once from READY state.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+ /* Re-attach the device if required */
+ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_vnic_pf_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Ensure vNIC mode privileged function starts only after vNIC mode is
+ * enabled by management function.
+ * If vNIC mode is ready, start initialization.
+ *
+ * Returns: -EIO or 0
+ *
+ **/
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Privileged function waits till mgmt function enables VNIC mode */
+ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE);
+ if (state != QLCNIC_DEV_NPAR_OPER) {
+ if (!ahw->idc.vnic_wait_limit--) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ dev_info(&adapter->pdev->dev, "vNIC mode disabled\n");
+ return -EIO;
+
+ } else {
+ /* Perform one time initialization from ready state */
+ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+
+ /* If the previous state is UNKNOWN, device will be
+ already attached properly by Init routine*/
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER;
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->idc.err_code = -EIO;
+ dev_err(&adapter->pdev->dev,
+ "%s: Device in unknown state\n", __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_cold_state
+ *
+ * @adapter: adapter structure
+ *
+ * If HW is up and running device will enter READY state.
+ * If firmware image from host needs to be loaded, device is
+ * forced to start with the file firmware image.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0);
+
+ if (qlcnic_load_fw_file) {
+ qlcnic_83xx_idc_restart_hw(adapter, 0);
+ } else {
+ if (qlcnic_83xx_check_hw_status(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ return -EIO;
+ } else {
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_init_state
+ *
+ * @adapter: adapter structure
+ *
+ * Reset owner will restart the device from this state.
+ * Device will enter failed state if it remains
+ * in this state for more than DEV_INIT time limit.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
+{
+ int timeout, ret = 0;
+ u32 owner;
+
+ timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (adapter->ahw->pci_func == owner)
+ ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
+ } else {
+ ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state
+ *
+ * @adapter: adapter structure
+ *
+ * Perform IDC protocol specicifed actions after monitoring device state and
+ * events.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ int ret = 0;
+ u32 val;
+
+ /* Perform NIC configuration based ready state entry actions */
+ if (ahw->idc.state_entry(adapter))
+ return -EIO;
+
+ if (qlcnic_check_temp(adapter)) {
+ if (ahw->temp == QLCNIC_TEMP_PANIC) {
+ qlcnic_83xx_idc_check_fan_failure(adapter);
+ dev_err(&adapter->pdev->dev,
+ "Error: device temperature %d above limits\n",
+ adapter->ahw->temp);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ ret = qlcnic_83xx_check_heartbeat(adapter);
+ if (ret) {
+ adapter->flags |= QLCNIC_FW_HANG;
+ if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ } else {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ }
+ return -EIO;
+ }
+
+ if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
+ /* Move to need reset state and prepare for reset */
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ return ret;
+ }
+
+ /* Check for soft reset request */
+ if (ahw->reset_context &&
+ !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ adapter->ahw->reset_context = 0;
+ qlcnic_83xx_idc_tx_soft_reset(adapter);
+ return ret;
+ }
+
+ /* Move to need quiesce state if requested */
+ if (adapter->ahw->idc.quiesce_req) {
+ qlcnic_83xx_idc_enter_need_quiesce(adapter, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_need_reset_state
+ *
+ * @adapter: adapter structure
+ *
+ * Device will remain in this state until:
+ * Reset request ACK's are recieved from all the functions
+ * Wait time exceeds max time limit
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ int ret = 0;
+
+ if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ if (qlcnic_check_diag_status(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Wait for diag completion\n", __func__);
+ adapter->ahw->idc.delay_reset = 1;
+ return 0;
+ } else {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ }
+ }
+
+ if (qlcnic_check_diag_status(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Wait for diag completion\n", __func__);
+ return -1;
+ } else {
+ if (adapter->ahw->idc.delay_reset) {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ adapter->ahw->idc.delay_reset = 0;
+ }
+
+ /* Check for ACK from other functions */
+ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Waiting for reset ACK\n", __func__);
+ return -1;
+ }
+ }
+
+ /* Transit to INIT state and restart the HW */
+ qlcnic_83xx_idc_enter_init_state(adapter, 1);
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 val, owner;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_dump_fw(adapter);
+ }
+ }
+
+ netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+ __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ ahw->idc.err_code = -EIO;
+
+ return;
+}
+
+static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
+ u32 state)
+{
+ u32 cur, prev, next;
+
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+ next = state;
+
+ if ((next < QLC_83XX_IDC_DEV_COLD) ||
+ (next > QLC_83XX_IDC_DEV_QUISCENT)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: curr %d, prev %d, next state %d is invalid\n",
+ __func__, cur, prev, state);
+ return 1;
+ }
+
+ if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
+ (prev == QLC_83XX_IDC_DEV_UNKNOWN)) {
+ if ((next != QLC_83XX_IDC_DEV_COLD) &&
+ (next != QLC_83XX_IDC_DEV_READY)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ if (next == QLC_83XX_IDC_DEV_INIT) {
+ if ((prev != QLC_83XX_IDC_DEV_INIT) &&
+ (prev != QLC_83XX_IDC_DEV_COLD) &&
+ (prev != QLC_83XX_IDC_DEV_NEED_RESET)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_QLCNIC_VXLAN
+#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
+#define QLC_83XX_MATCH_ENCAP_ID BIT_2
+#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
+#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
+
+#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
+#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
+
+static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
+{
+ u16 port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
+ cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
+ QLC_83XX_SET_VXLAN_UDP_DPORT |
+ QLC_83XX_VXLAN_UDP_DPORT(port);
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to set VXLAN port %d in adapter\n",
+ port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+
+static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
+ bool state)
+{
+ u16 vxlan_port = adapter->ahw->vxlan_port;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_INGRESS_ENCAP);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+ QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to %s VXLAN parsing for port %d\n",
+ state ? "enable" : "disable", vxlan_port);
+ else
+ netdev_info(adapter->netdev,
+ "%s VXLAN parsing for port %d\n",
+ state ? "Enabled" : "Disabled", vxlan_port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+#endif
+
+static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_port(adapter))
+ return;
+
+ if (qlcnic_set_vxlan_parsing(adapter, true))
+ return;
+
+ adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
+ } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
+ if (qlcnic_set_vxlan_parsing(adapter, false))
+ return;
+
+ adapter->ahw->vxlan_port = 0;
+ adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
+ }
+#endif
+}
+
+/**
+ * qlcnic_83xx_idc_poll_dev_state
+ *
+ * @work: kernel work queue structure used to schedule the function
+ *
+ * Poll device state periodically and perform state specific
+ * actions defined by Inter Driver Communication (IDC) protocol.
+ *
+ * Returns: None
+ *
+ **/
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ u32 state;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_idc_log_state_history(adapter);
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ } else {
+ adapter->ahw->idc.curr_state = state;
+ }
+
+ switch (adapter->ahw->idc.curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ qlcnic_83xx_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ qlcnic_83xx_idc_need_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ qlcnic_83xx_idc_need_quiesce_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ qlcnic_83xx_idc_failed_state(adapter);
+ return;
+ case QLC_83XX_IDC_DEV_INIT:
+ qlcnic_83xx_idc_init_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ qlcnic_83xx_idc_quiesce_state(adapter);
+ break;
+ default:
+ qlcnic_83xx_idc_unknown_state(adapter);
+ return;
+ }
+ adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
+ qlcnic_83xx_periodic_tasks(adapter);
+
+ /* Re-schedule the function */
+ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ adapter->ahw->idc.delay);
+}
+
+static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
+{
+ u32 idc_params, val;
+
+ if (qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:failed to get IDC params from flash\n", __func__);
+ adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ } else {
+ adapter->dev_init_timeo = idc_params & 0xFFFF;
+ adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF);
+ }
+
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+ adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ /* Check if reset recovery is disabled */
+ if (!qlcnic_auto_fw_reset) {
+ /* Propagate do not reset request to other functions */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+}
+
+static int
+qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
+{
+ u32 state, val;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EIO;
+
+ /* Clear driver lock register */
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0);
+ if (qlcnic_83xx_idc_update_major_version(adapter, 0)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) {
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_COLD);
+ state = QLC_83XX_IDC_DEV_COLD;
+ }
+
+ adapter->ahw->idc.curr_state = state;
+ /* First to load function should cold boot the device */
+ if (state == QLC_83XX_IDC_DEV_COLD)
+ qlcnic_83xx_idc_cold_state_handler(adapter);
+
+ /* Check if reset recovery is enabled */
+ if (qlcnic_auto_fw_reset) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+{
+ int ret = -EIO;
+
+ qlcnic_83xx_setup_idc_parameters(adapter);
+
+ if (qlcnic_83xx_get_reset_instruction_template(adapter))
+ return ret;
+
+ if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) {
+ if (qlcnic_83xx_idc_first_to_load_function_handler(adapter))
+ return -EIO;
+ } else {
+ if (qlcnic_83xx_idc_check_major_version(adapter))
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+
+ return 0;
+}
+
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter)
+{
+ int id;
+ u32 val;
+
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+
+ if (id == adapter->portnum) {
+ dev_err(&adapter->pdev->dev,
+ "%s: wait for lock recovery.. %d\n", __func__, id);
+ msleep(20);
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+ }
+
+ /* Clear driver presence bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
+{
+ u32 val;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, please retry\n", __func__);
+ return;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+ }
+
+ if (key == QLCNIC_FORCE_FW_RESET) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) {
+ adapter->ahw->idc.collect_dump = 1;
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+}
+
+static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
+{
+ u8 *p_cache;
+ u32 src, size;
+ u64 dest;
+ int ret = -EIO;
+
+ src = QLC_83XX_BOOTLOADER_FLASH_ADDR;
+ dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR);
+ size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE);
+
+ /* alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ p_cache = vzalloc(size);
+ if (p_cache == NULL)
+ return -ENOMEM;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
+ size / sizeof(u32));
+ if (ret) {
+ vfree(p_cache);
+ return ret;
+ }
+ /* 16 byte write to MS memory */
+ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
+ if (ret) {
+ vfree(p_cache);
+ return ret;
+ }
+ vfree(p_cache);
+
+ return ret;
+}
+
+static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ const struct firmware *fw = fw_info->fw;
+ u32 dest, *p_cache;
+ int i, ret = -EIO;
+ u8 data[16];
+ size_t size;
+ u64 addr;
+
+ dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
+ size = (fw->size & ~0xF);
+ p_cache = (u32 *)fw->data;
+ addr = (u64)dest;
+
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ p_cache, size / 16);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "MS memory write failed\n");
+ release_firmware(fw);
+ fw_info->fw = NULL;
+ return -EIO;
+ }
+
+ /* alignment check */
+ if (fw->size & 0xF) {
+ addr = dest + size;
+ for (i = 0; i < (fw->size & 0xF); i++)
+ data[i] = fw->data[size + i];
+ for (; i < 16; i++)
+ data[i] = 0;
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "MS memory write failed\n");
+ release_firmware(fw);
+ fw_info->fw = NULL;
+ return -EIO;
+ }
+ }
+ release_firmware(fw);
+ fw_info->fw = NULL;
+
+ return 0;
+}
+
+static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
+{
+ int i, j;
+ u32 val = 0, val1 = 0, reg = 0;
+ int err = 0;
+
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_THRESHOLD;
+ }
+ for (i = 0; i < 8; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+ }
+ for (i = 0; i < 4; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_TC_STATS;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_TC_STATS;
+ }
+ for (i = 7; i >= 0; i--) {
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ QLCWR32(adapter, reg, (val | (i << 29)));
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1);
+}
+
+
+static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
+{
+ u32 reg = 0, i, j;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed to acquire driver lock\n", __func__);
+ return;
+ }
+
+ qlcnic_83xx_dump_pause_control_regs(adapter);
+ QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_THRESHOLD;
+
+ for (i = 0; i < 8; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x0);
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+
+ for (i = 0; i < 4; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF);
+ }
+
+ QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0);
+ QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0);
+ dev_info(&adapter->pdev->dev,
+ "Disabled pause frames successfully on all ports\n");
+ qlcnic_83xx_unlock_driver(adapter);
+}
+
+static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
+{
+ QLCWR32(adapter, QLC_83XX_RESET_REG, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1);
+}
+
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
+{
+ u32 heartbeat, peg_status;
+ int retries, ret = -EIO, err = 0;
+
+ retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+ p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != p_dev->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ if (ret) {
+ dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+ qlcnic_83xx_take_eport_out_of_reset(p_dev);
+ qlcnic_83xx_disable_pause_frames(p_dev);
+ peg_status = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_HALT_STATUS1);
+ dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n", peg_status,
+ QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
+
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&p_dev->pdev->dev,
+ "Device is being reset err code 0x00006700.\n");
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
+{
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+ u32 val;
+
+ do {
+ val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE);
+ if (val == QLC_83XX_CMDPEG_COMPLETE)
+ return 0;
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val);
+ return -EIO;
+}
+
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+{
+ int err;
+
+ err = qlcnic_83xx_check_cmd_peg_status(p_dev);
+ if (err)
+ return err;
+
+ err = qlcnic_83xx_check_heartbeat(p_dev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
+ int duration, u32 mask, u32 status)
+{
+ int timeout_error, err = 0;
+ u32 value;
+ u8 retries;
+
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
+ retries = duration / 10;
+
+ do {
+ if ((value & mask) != status) {
+ timeout_error = 1;
+ msleep(duration / 10);
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+ if (timeout_error) {
+ p_dev->ahw->reset.seq_error++;
+ dev_err(&p_dev->pdev->dev,
+ "%s: Timeout Err, entry_num = %d\n",
+ __func__, p_dev->ahw->reset.seq_index);
+ dev_err(&p_dev->pdev->dev,
+ "0x%08x 0x%08x 0x%08x\n",
+ value, mask, status);
+ }
+
+ return timeout_error;
+}
+
+static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
+{
+ u32 sum = 0;
+ u16 *buff = (u16 *)p_dev->ahw->reset.buff;
+ int count = p_dev->ahw->reset.hdr->size / sizeof(u16);
+
+ while (count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ if (~sum) {
+ return 0;
+ } else {
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+ return -1;
+ }
+}
+
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+{
+ struct qlcnic_hardware_context *ahw = p_dev->ahw;
+ u32 addr, count, prev_ver, curr_ver;
+ u8 *p_buff;
+
+ if (ahw->reset.buff != NULL) {
+ prev_ver = p_dev->fw_version;
+ curr_ver = qlcnic_83xx_get_fw_version(p_dev);
+ if (curr_ver > prev_ver)
+ kfree(ahw->reset.buff);
+ else
+ return 0;
+ }
+
+ ahw->reset.seq_error = 0;
+ ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
+ if (p_dev->ahw->reset.buff == NULL)
+ return -ENOMEM;
+
+ p_buff = p_dev->ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR;
+ count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32);
+
+ /* Copy template header from flash */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+ ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size;
+ p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32);
+
+ /* Copy rest of the template */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+
+ if (qlcnic_83xx_reset_template_checksum(p_dev))
+ return -EIO;
+ /* Get Stop, Start and Init command offsets */
+ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset;
+ ahw->reset.start_offset = ahw->reset.buff +
+ ahw->reset.hdr->start_offset;
+ ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ return 0;
+}
+
+/* Read Write HW register command */
+static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr)
+{
+ int err = 0;
+ u32 value;
+
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Read Modify Write HW register command */
+static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr,
+ struct qlc_83xx_rmw *p_rmw_hdr)
+{
+ int err = 0;
+ u32 value;
+
+ if (p_rmw_hdr->index_a) {
+ value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
+ } else {
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ }
+
+ value &= p_rmw_hdr->mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Write HW register command */
+static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Read and Write instruction */
+static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Poll HW register command */
+static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_poll *poll;
+ int i, err = 0;
+ unsigned long arg1, arg2;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, entry++)
+ qlcnic_83xx_poll_reg(p_dev, entry->arg1,
+ delay, poll->mask,
+ poll->status);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ arg1 = entry->arg1;
+ arg2 = entry->arg2;
+ if (delay) {
+ if (qlcnic_83xx_poll_reg(p_dev,
+ arg1, delay,
+ poll->mask,
+ poll->status)){
+ QLCRD32(p_dev, arg1, &err);
+ if (err == -EIO)
+ return;
+ QLCRD32(p_dev, arg2, &err);
+ if (err == -EIO)
+ return;
+ }
+ }
+ }
+ }
+}
+
+/* Poll and write HW register command */
+static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ long delay;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr,
+ entry->dr_value);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay)
+ qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status);
+ }
+}
+
+/* Read Modify Write register command */
+static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_rmw *rmw_hdr;
+
+ rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)rmw_hdr +
+ sizeof(struct qlc_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1,
+ entry->arg2, rmw_hdr);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((u32)((long)p_hdr->delay));
+}
+
+/* Read and poll register command */
+static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ int index, i, j, err;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+ unsigned long addr;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay) {
+ if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status)){
+ index = p_dev->ahw->reset.array_index;
+ addr = entry->dr_addr;
+ j = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return;
+
+ p_dev->ahw->reset.array[index++] = j;
+
+ if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
+ p_dev->ahw->reset.array_index = 1;
+ }
+ }
+ }
+}
+
+static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_end = 1;
+}
+
+static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.template_end = 1;
+ if (p_dev->ahw->reset.seq_error == 0)
+ dev_err(&p_dev->pdev->dev,
+ "HW restart process completed successfully.\n");
+ else
+ dev_err(&p_dev->pdev->dev,
+ "HW restart completed with timeout errors.\n");
+}
+
+/**
+* qlcnic_83xx_exec_template_cmd
+*
+* @p_dev: adapter structure
+* @p_buff: Poiter to instruction template
+*
+* Template provides instructions to stop, restart and initalize firmware.
+* These instructions are abstracted as a series of read, write and
+* poll operations on hardware registers. Register information and operation
+* specifics are not exposed to the driver. Driver reads the template from
+* flash and executes the instructions located at pre-defined offsets.
+*
+* Returns: None
+* */
+static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
+ char *p_buff)
+{
+ int index, entries;
+ struct qlc_83xx_entry_hdr *p_hdr;
+ char *entry = p_buff;
+
+ p_dev->ahw->reset.seq_end = 0;
+ p_dev->ahw->reset.template_end = 0;
+ entries = p_dev->ahw->reset.hdr->entries;
+ index = p_dev->ahw->reset.seq_index;
+
+ for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qlc_83xx_entry_hdr *)entry;
+
+ switch (p_hdr->cmd) {
+ case QLC_83XX_OPCODE_NOP:
+ break;
+ case QLC_83XX_OPCODE_WRITE_LIST:
+ qlcnic_83xx_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_WRITE_LIST:
+ qlcnic_83xx_read_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_LIST:
+ qlcnic_83xx_poll_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_WRITE_LIST:
+ qlcnic_83xx_poll_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_MODIFY_WRITE:
+ qlcnic_83xx_read_modify_write(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_PAUSE:
+ qlcnic_83xx_pause(p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_END:
+ qlcnic_83xx_seq_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_TMPL_END:
+ qlcnic_83xx_template_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_POLL_READ_LIST:
+ qlcnic_83xx_poll_read_list(p_dev, p_hdr);
+ break;
+ default:
+ dev_err(&p_dev->pdev->dev,
+ "%s: Unknown opcode 0x%04x in template %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+ entry += p_hdr->size;
+ }
+ p_dev->ahw->reset.seq_index = index;
+}
+
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_index = 0;
+
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset);
+ if (p_dev->ahw->reset.template_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ int err = -EIO;
+
+ if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
+ &(adapter->pdev->dev))) {
+ dev_err(&adapter->pdev->dev,
+ "No file FW image, loading flash FW image.\n");
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ } else {
+ if (qlcnic_83xx_copy_fw_file(adapter))
+ return err;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int err = -EIO;
+
+ qlcnic_83xx_stop_hw(adapter);
+
+ /* Collect FW register dump if required */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
+ qlcnic_dump_fw(adapter);
+
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return err;
+ }
+
+ qlcnic_83xx_init_hw(adapter);
+
+ if (qlcnic_83xx_copy_bootloader(adapter))
+ return err;
+ /* Boot either flash image or firmware image from host file system */
+ if (qlcnic_load_fw_file) {
+ if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ return err;
+ } else {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ }
+
+ qlcnic_83xx_start_hw(adapter);
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_tx_ques = nic_info.max_tx_ques;
+ ahw->max_rx_ques = nic_info.max_rx_ques;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->max_mac_filters = nic_info.max_mac_filters;
+ ahw->max_mtu = nic_info.max_mtu;
+
+ /* eSwitch capability indicates vNIC mode.
+ * vNIC and SRIOV are mutually exclusive operational modes.
+ * If SR-IOV capability is detected, SR-IOV physical function
+ * will get initialized in default mode.
+ * SR-IOV virtual function initialization follows a
+ * different code path and opmode.
+ * SRIOV mode has precedence over vNIC mode.
+ */
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ return QLC_83XX_DEFAULT_OPMODE;
+
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ return QLCNIC_VNIC_MODE;
+
+ return QLC_83XX_DEFAULT_OPMODE;
+}
+
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 max_sds_rings, max_tx_rings;
+ int ret;
+
+ ret = qlcnic_83xx_get_nic_configuration(adapter);
+ if (ret == -EIO)
+ return -EIO;
+
+ if (ret == QLCNIC_VNIC_MODE) {
+ ahw->nic_mode = QLCNIC_VNIC_MODE;
+
+ if (qlcnic_83xx_config_vnic_opmode(adapter))
+ return -EIO;
+
+ max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
+ } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+ ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ } else {
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+ __func__, ret);
+ return -EIO;
+ }
+
+ adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
+ adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
+
+ return 0;
+}
+
+static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1))
+static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 presence_mask, audit_mask;
+ int status;
+
+ presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+
+ if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_STOP_NIC_FUNC);
+ if (status)
+ return;
+
+ cmd.req.arg[1] = BIT_31;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to clean up the function resources\n");
+ qlcnic_free_mbx_args(&cmd);
+ }
+}
+
+static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlc_83xx_fw_info *fw_info;
+ int err = 0;
+
+ ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+ if (!ahw->fw_info) {
+ err = -ENOMEM;
+ } else {
+ fw_info = ahw->fw_info;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Invalid device id\n",
+ __func__);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+ u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+ u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (!adapter->ahw->msix_supported) {
+ rx_cnt = QLCNIC_SINGLE_RING;
+ tx_cnt = QLCNIC_SINGLE_RING;
+ }
+
+ /* compute and set drv sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
+
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
+
+ adapter->rx_mac_learn = false;
+ ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ qlcnic_83xx_init_rings(adapter);
+
+ err = qlcnic_83xx_init_mailbox_work(adapter);
+ if (err)
+ goto exit;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ if (err)
+ goto detach_mbx;
+ else
+ return err;
+ }
+
+ if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+ qlcnic_83xx_read_flash_mfg_id(adapter)) {
+ dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+ err = -ENOTRECOVERABLE;
+ goto detach_mbx;
+ }
+
+ err = qlcnic_83xx_check_hw_status(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_83xx_get_fw_info(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto disable_intr;
+ }
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ qlcnic_83xx_clear_function_resources(adapter);
+ qlcnic_dcb_enable(adapter->dcb);
+ qlcnic_83xx_initialize_nic(adapter, 1);
+ qlcnic_dcb_get_info(adapter->dcb);
+
+ /* Configure default, SR-IOV or Virtual NIC mode of operation */
+ err = qlcnic_83xx_configure_opmode(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+
+ /* Perform operating mode specific initialization */
+ err = adapter->nic_ops->init_driver(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ /* Periodically monitor device status */
+ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+ return 0;
+
+disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+disable_intr:
+ qlcnic_teardown_intr(adapter);
+
+detach_mbx:
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ ahw->mailbox = NULL;
+exit:
+ return err;
+}
+
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_initialize_nic(adapter, 0);
+
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+}
+
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int ret = 0;
+ u32 owner;
+
+ /* Mark the previous IDC state as NEED_RESET so
+ * that state_entry() will perform the reattachment
+ * and bringup the device
+ */
+ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ ret = qlcnic_83xx_restart_hw(adapter);
+ if (ret < 0)
+ return ret;
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ }
+
+ ret = idc->state_entry(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 owner;
+
+ idc->prev_state = QLC_83XX_IDC_DEV_READY;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
new file mode 100644
index 00000000000..be7d7a62cc0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -0,0 +1,284 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int ret = -EBUSY;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return ret;
+
+ id = ahw->pci_func;
+ data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) |
+ QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id);
+
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+
+/**
+ * qlcnic_83xx_init_mgmt_vnic
+ *
+ * @adapter: adapter structure
+ * Management virtual NIC sets the operational mode of other vNIC's and
+ * configures embedded switch (ESWITCH).
+ * Returns: Success(0) or error code.
+ *
+ **/
+static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_npar_info *npar;
+ int i, err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+
+ if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
+ if (qlcnic_init_pci_info(adapter))
+ return err;
+
+ npar = adapter->npars;
+
+ for (i = 0; i < ahw->total_nic_func; i++, npar++) {
+ dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+ npar->pci_func, npar->active, npar->type,
+ npar->phy_port, npar->min_bw, npar->max_bw,
+ npar->mac);
+ }
+
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
+ ahw->max_pci_func, ahw->total_nic_func);
+
+ if (qlcnic_83xx_set_vnic_opmode(adapter))
+ return err;
+
+ if (qlcnic_set_default_offload_settings(adapter))
+ return err;
+ } else {
+ if (qlcnic_reset_npar_config(adapter))
+ return err;
+ }
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ qlcnic_83xx_enable_vnic_mode(adapter, 1);
+
+ dev_info(dev, "HAL Version: %d, Management function\n",
+ ahw->fw_hal_version);
+
+ return 0;
+}
+
+static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ return 0;
+}
+
+static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_fw_version(adapter);
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return err;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_vnic_opmode
+ *
+ * @adapter: adapter structure
+ * Identify virtual NIC operational modes.
+ *
+ * Returns: Success(0) or error code.
+ *
+ **/
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_nic_template *nic_ops = adapter->nic_ops;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+ switch (priv_level) {
+ case QLCNIC_NON_PRIV_FUNC:
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+ break;
+ case QLCNIC_PRIV_FUNC:
+ ahw->op_mode = QLCNIC_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+ break;
+ case QLCNIC_MGMT_FUNC:
+ ahw->op_mode = QLCNIC_MGMT_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
+ return -EIO;
+ }
+
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ if (adapter->drv_mac_learn)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ adapter->rx_mac_learn = false;
+ }
+
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+ ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+ return 0;
+}
+
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 state;
+
+ state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+ while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) {
+ msleep(1000);
+ state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+ }
+
+ if (!idc->vnic_wait_limit) {
+ dev_err(&adapter->pdev->dev,
+ "vNIC mode not operational, state check timed out.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ return err;
+
+ if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ *port_id = nic_info.phys_port;
+ else
+ err = -EIO;
+
+ if (!err)
+ adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
+
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index b14b8f0787e..304e247bdf3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1,29 +1,101 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
-static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 4, 1},
+ {QLCNIC_CMD_SET_MTU, 4, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
+ {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 4, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
+ {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
+ {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+ {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
+};
+
+static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
{
- int i;
+ return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
+ (0xcafe << 16);
+}
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
- if (adapter->npars[i].pci_func == pci_func)
- return i;
+/* Allocate mailbox registers */
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = type;
+ break;
+ }
}
+ return 0;
+}
- return -1;
+/* Free up mailbox registers */
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
+{
+ kfree(cmd->req.arg);
+ cmd->req.arg = NULL;
+ kfree(cmd->rsp.arg);
+ cmd->rsp.arg = NULL;
}
static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
u32 rsp;
- int timeout = 0;
+ int timeout = 0, err = 0;
do {
/* give atleast 1ms for firmware to respond */
@@ -32,235 +104,169 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
return QLCNIC_CDRP_RSP_TIMEOUT;
- rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
} while (!QLCNIC_CDRP_IS_RSP(rsp));
return rsp;
}
-void
-qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
+ int i, err = 0;
u32 rsp;
u32 signature;
struct pci_dev *pdev = adapter->pdev;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ const char *fmt;
- signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
- adapter->ahw->fw_hal_version);
+ signature = qlcnic_get_cmd_signature(ahw);
/* Acquire semaphore before accessing CRB */
if (qlcnic_api_lock(adapter)) {
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
- return;
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ return cmd->rsp.arg[0];
}
QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
- QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1);
- QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2);
- QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3);
+ for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++)
+ QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
- QLCNIC_CDRP_FORM_CMD(cmd->req.cmd));
-
+ QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "CDRP response timeout.\n");
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
+ dev_err(&pdev->dev, "card response timeout.\n");
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
- cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- switch (cmd->rsp.cmd) {
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
+ switch (cmd->rsp.arg[0]) {
case QLCNIC_RCODE_INVALID_ARGS:
- dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid args: [%d]\n";
break;
case QLCNIC_RCODE_NOT_SUPPORTED:
case QLCNIC_RCODE_NOT_IMPL:
- dev_err(&pdev->dev,
- "CDRP command not supported: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command not supported: [%d]\n";
break;
case QLCNIC_RCODE_NOT_PERMITTED:
- dev_err(&pdev->dev,
- "CDRP requested action not permitted: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP requested action not permitted: [%d]\n";
break;
case QLCNIC_RCODE_INVALID:
- dev_err(&pdev->dev,
- "CDRP invalid or unknown cmd received: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid or unknown cmd received: [%d]\n";
break;
case QLCNIC_RCODE_TIMEOUT:
- dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command timeout: [%d]\n";
break;
default:
- dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command failed: [%d]\n";
+ break;
}
- } else if (rsp == QLCNIC_CDRP_RSP_OK) {
- cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
- if (cmd->rsp.arg2)
- cmd->rsp.arg2 = QLCRD32(adapter,
- QLCNIC_ARG2_CRB_OFFSET);
- if (cmd->rsp.arg3)
- cmd->rsp.arg3 = QLCRD32(adapter,
- QLCNIC_ARG3_CRB_OFFSET);
- }
- if (cmd->rsp.arg1)
- cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ qlcnic_dump_mbx(adapter, cmd);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK)
+ cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
+
+ for (i = 1; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
/* Release semaphore */
qlcnic_api_unlock(adapter);
-
+ return cmd->rsp.arg[0];
}
-static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
{
- uint64_t sum = 0;
- int count = temp_size / sizeof(uint32_t);
- while (count-- > 0)
- sum += *temp_buffer++;
- while (sum >> 32)
- sum = (sum & 0xFFFFFFFF) + (sum >> 32);
- return ~sum;
-}
-
-int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
-{
- int err, i;
- void *tmp_addr;
- u32 temp_size, version, csum, *template;
- __le32 *tmp_buf;
struct qlcnic_cmd_args cmd;
- struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
- dma_addr_t tmp_addr_t = 0;
-
- ahw = adapter->ahw;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE;
- memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) {
- dev_info(&adapter->pdev->dev,
- "Can't get template size %d\n", cmd.rsp.cmd);
- err = -EIO;
+ u32 arg1, arg2, arg3;
+ char drv_string[12];
+ int err = 0;
+
+ memset(drv_string, 0, sizeof(drv_string));
+ snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
+ _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
+ _QLCNIC_LINUX_SUBVERSION);
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
+ if (err)
return err;
- }
- temp_size = cmd.rsp.arg2;
- version = cmd.rsp.arg3;
- dev_info(&adapter->pdev->dev,
- "minidump template version = 0x%x", version);
- if (!temp_size)
- return -EIO;
- tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
- &tmp_addr_t, GFP_KERNEL);
- if (!tmp_addr) {
- dev_err(&adapter->pdev->dev,
- "Can't get memory for FW dump template\n");
- return -ENOMEM;
- }
- memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR;
- cmd.req.arg1 = LSD(tmp_addr_t);
- cmd.req.arg2 = MSD(tmp_addr_t);
- cmd.req.arg3 = temp_size;
- qlcnic_issue_cmd(adapter, &cmd);
-
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to get mini dump template header %d\n", err);
- err = -EIO;
- goto error;
- }
- ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
- if (!ahw->fw_dump.tmpl_hdr) {
- err = -EIO;
- goto error;
- }
- tmp_buf = tmp_addr;
- template = (u32 *) ahw->fw_dump.tmpl_hdr;
- for (i = 0; i < temp_size/sizeof(u32); i++)
- *template++ = __le32_to_cpu(*tmp_buf++);
+ memcpy(&arg1, drv_string, sizeof(u32));
+ memcpy(&arg2, drv_string + 4, sizeof(u32));
+ memcpy(&arg3, drv_string + 8, sizeof(u32));
- csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
- if (csum) {
- dev_err(&adapter->pdev->dev,
- "Template header checksum validation failed\n");
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ cmd.req.arg[3] = arg3;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to set driver version in firmware\n");
err = -EIO;
- goto error;
}
-
- tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
- ahw->fw_dump.enable = 1;
-error:
- dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+ qlcnic_free_mbx_args(&cmd);
return err;
}
int
qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
{
+ int err = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU;
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = mtu;
- cmd.req.arg3 = 0;
- if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd) {
- dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
- return -EIO;
- }
- }
+ if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
+ return err;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+ if (err)
+ return err;
- return 0;
+ cmd.req.arg[1] = recv_ctx->context_id;
+ cmd.req.arg[2] = mtu;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
-static int
-qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
- void *addr;
- struct qlcnic_hostrq_rx_ctx *prq;
- struct qlcnic_cardrsp_rx_ctx *prsp;
- struct qlcnic_hostrq_rds_ring *prq_rds;
- struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_intr_crb_mode, temp_rds_crb_mode;
struct qlcnic_cardrsp_rds_ring *prsp_rds;
struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_cmd_args cmd;
-
- dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
- u64 phys_addr;
-
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rx_ctx *prq;
u8 i, nrds_rings, nsds_rings;
+ struct qlcnic_cmd_args cmd;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
+ u64 phys_addr;
+ u16 temp_u16;
+ void *addr;
int err;
- u16 temp;
-
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
nrds_rings = adapter->max_rds_rings;
- nsds_rings = adapter->max_sds_rings;
+ nsds_rings = adapter->drv_sds_rings;
- rq_size =
- SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
- nsds_rings);
- rsp_size =
- SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
- nsds_rings);
+ rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &hostrq_phys_addr, GFP_KERNEL);
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
@@ -279,18 +285,20 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
- cap |= QLCNIC_CAP0_LRO_MSS;
-
- temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp);
- prq->txrx_sds_binding = nsds_rings - 1;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ cap |= QLCNIC_CAP0_TX_MULTI;
+ } else {
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
+ prq->txrx_sds_binding = nsds_rings - 1;
+ temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+ temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+ prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+ }
prq->capabilities[0] = cpu_to_le32(cap);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->host_rds_crb_mode =
- cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
prq->num_rds_rings = cpu_to_le16(nrds_rings);
prq->num_sds_rings = cpu_to_le16(nsds_rings);
@@ -304,10 +312,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->rds_ring_offset));
for (i = 0; i < nrds_rings; i++) {
-
rds_ring = &recv_ctx->rds_rings[i];
rds_ring->producer = 0;
-
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
prq_rds[i].ring_kind = cpu_to_le32(i);
@@ -318,39 +324,40 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->sds_ring_offset));
for (i = 0; i < nsds_rings; i++) {
-
sds_ring = &recv_ctx->sds_rings[i];
sds_ring->consumer = 0;
memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
-
prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
- prq_sds[i].msi_index = cpu_to_le16(i);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+ else
+ prq_sds[i].msi_index = cpu_to_le16(i);
}
phys_addr = hostrq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32) (phys_addr >> 32);
- cmd.req.arg2 = (u32) (phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+ if (err)
+ goto out_free_rsp;
+
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to create rx ctx in firmware%d\n", err);
goto out_free_rsp;
}
-
prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
&prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
rds_ring = &recv_ctx->rds_rings[i];
-
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
+ rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -358,59 +365,70 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
sds_ring = &recv_ctx->sds_rings[i];
-
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
- reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ reg2 = ahw->intr_tbl[i].src;
+ else
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
- sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
+ sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
recv_ctx->context_id = le16_to_cpu(prsp->context_id);
recv_ctx->virt_port = prsp->virt_port;
+ netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ qlcnic_free_mbx_args(&cmd);
+
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
- cardrsp_phys_addr);
+ cardrsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
return err;
}
-static void
-qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
{
+ int err;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+ if (err)
+ return;
+
+ cmd.req.arg[1] = recv_ctx->context_id;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
dev_err(&adapter->pdev->dev,
"Failed to destroy rx ctx in firmware\n");
recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
}
-static int
-qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int ring)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
struct qlcnic_cardrsp_tx_ctx *prsp;
- void *rq_addr, *rsp_addr;
- size_t rq_size, rsp_size;
- u32 temp;
struct qlcnic_cmd_args cmd;
- int err;
- u64 phys_addr;
- dma_addr_t rq_phys_addr, rsp_phys_addr;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ u32 temp, intr_mask, temp_int_crb_mode;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ int temp_nsds_rings, index, err;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u64 phys_addr;
+ u16 msix_id;
/* reset host resources */
tx_ring->producer = 0;
@@ -418,36 +436,44 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL);
+ rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL);
+ rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
- memset(rq_addr, 0, rq_size);
prq = rq_addr;
-
- memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
- QLCNIC_CAP0_LSO);
+ QLCNIC_CAP0_LSO);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ temp |= QLCNIC_CAP0_TX_MULTI;
+
prq->capabilities[0] = cpu_to_le32(temp);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ temp_nsds_rings = adapter->drv_sds_rings;
+ index = temp_nsds_rings + ring;
+ msix_id = ahw->intr_tbl[index].id;
+ prq->msi_index = cpu_to_le16(msix_id);
+ } else {
+ temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+ prq->msi_index = 0;
+ }
prq->interrupt_ctl = 0;
- prq->msi_index = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
prq_cds = &prq->cds_ring;
@@ -456,105 +482,121 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
phys_addr = rq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32)(phys_addr >> 32);
- cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ if (err)
+ goto out_free_rsp;
+
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
+ tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
+ tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ index = adapter->drv_sds_rings + ring;
+ intr_mask = ahw->intr_tbl[index].src;
+ tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
- adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+ tx_ring->ctx_id, tx_ring->state);
} else {
- dev_err(&adapter->pdev->dev,
- "Failed to create tx ctx in firmware%d\n", err);
+ netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+ err);
err = -EIO;
}
+ qlcnic_free_mbx_args(&cmd);
+out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
- rsp_phys_addr);
-
+ rsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
return err;
}
-static void
-qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_args cmd;
+ int ret;
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+ if (ret)
+ return;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->tx_ring->ctx_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ cmd.req.arg[1] = tx_ring->ctx_id;
+ if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
"Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
}
int
qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
{
+ int err;
struct qlcnic_cmd_args cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = config;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT;
- qlcnic_issue_cmd(adapter, &cmd);
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+ if (err)
+ return err;
- return cmd.rsp.cmd;
+ cmd.req.arg[1] = config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
{
void *addr;
- int err;
- int ring;
+ int err, ring;
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
+ __le32 *ptr;
struct pci_dev *pdev = adapter->pdev;
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
-
- tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
- sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
- if (tx_ring->hw_consumer == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx consumer\n");
- return -ENOMEM;
- }
- /* cmd desc ring */
- addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr, GFP_KERNEL);
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr,
+ GFP_KERNEL);
+ if (ptr == NULL)
+ return -ENOMEM;
+
+ tx_ring->hw_consumer = ptr;
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr,
+ GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
- if (addr == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
- err = -ENOMEM;
- goto err_out_free;
+ tx_ring->desc_head = addr;
}
- tx_ring->desc_head = addr;
-
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr, GFP_KERNEL);
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate rds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -562,15 +604,13 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr, GFP_KERNEL);
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate sds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -584,36 +624,88 @@ err_out_free:
return err;
}
-
-int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
{
- int err;
+ int i, err, ring;
+
+ if (dev->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(dev->pdev);
+ dev->flags &= ~QLCNIC_NEED_FLR;
+ }
+
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_config_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+ }
- if (adapter->flags & QLCNIC_NEED_FLR) {
- pci_reset_function(adapter->pdev);
- adapter->flags &= ~QLCNIC_NEED_FLR;
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+ err = qlcnic_82xx_mq_intrpt(dev, 1);
+ if (err)
+ return err;
}
- err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ err = qlcnic_fw_cmd_create_rx_ctx(dev);
if (err)
- return err;
+ goto err_out;
- err = qlcnic_fw_cmd_create_tx_ctx(adapter);
- if (err) {
- qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- return err;
+ for (ring = 0; ring < dev->drv_tx_rings; ring++) {
+ err = qlcnic_fw_cmd_create_tx_ctx(dev,
+ &dev->tx_ring[ring],
+ ring);
+ if (err) {
+ qlcnic_fw_cmd_del_rx_ctx(dev);
+ if (ring == 0)
+ goto err_out;
+
+ for (i = 0; i < ring; i++)
+ qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
+
+ goto err_out;
+ }
}
- set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
return 0;
+
+err_out:
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(dev, 0);
+
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(dev, 0);
+ }
+
+ return err;
}
void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
{
- if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
- qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+ int ring;
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_del_rx_ctx(adapter);
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+ qlcnic_fw_cmd_del_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
+
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(adapter, 0);
+
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(adapter, 0);
+ }
/* Allow dma queues to drain after context reset */
mdelay(20);
}
@@ -629,20 +721,23 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
- if (tx_ring->hw_consumer != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- sizeof(u32),
- tx_ring->hw_consumer,
- tx_ring->hw_cons_phys_addr);
- tx_ring->hw_consumer = NULL;
- }
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->hw_consumer != NULL) {
+ dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+
+ tx_ring->hw_consumer = NULL;
+ }
- if (tx_ring->desc_head != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
- tx_ring->desc_head = NULL;
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head,
+ tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -657,7 +752,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
@@ -670,90 +765,137 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_cmd_args cmd;
+ u32 type, val;
+ int i, err = 0;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (ahw->intr_tbl[i].type << 4);
+ if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[1] = val;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(netdev, "Failed to %s interrupts %d\n",
+ op_type == QLCNIC_INTRPT_ADD ? "Add" :
+ "Delete", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+ val = cmd.rsp.arg[1];
+ if (LSB(val)) {
+ netdev_info(netdev,
+ "failed to configure interrupt for %d\n",
+ ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ ahw->intr_tbl[i].id = MSW(val);
+ ahw->intr_tbl[i].enabled = 1;
+ ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+ } else {
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].enabled = 0;
+ ahw->intr_tbl[i].src = 0;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return err;
+}
-/* Get MAC address of a NIC partition */
-int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
- int err;
+ int err, i;
struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ if (err)
+ return err;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->ahw->pci_func | BIT_8;
- cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ cmd.req.arg[1] = function | BIT_8;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err == QLCNIC_RCODE_SUCCESS)
- qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
- else {
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
dev_err(&adapter->pdev->dev,
"Failed to get mac address%d\n", err);
err = -EIO;
}
-
+ qlcnic_free_mbx_args(&cmd);
return err;
}
/* Get info of a NIC partition */
-int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
- struct qlcnic_info *npar_info, u8 func_id)
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
{
int err;
dma_addr_t nic_dma_t;
- struct qlcnic_info_le *nic_info;
+ const struct qlcnic_info_le *nic_info;
void *nic_info_addr;
struct qlcnic_cmd_args cmd;
- size_t nic_size = sizeof(struct qlcnic_info_le);
+ size_t nic_size = sizeof(struct qlcnic_info_le);
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = (func_id << 16 | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
- if (err == QLCNIC_RCODE_SUCCESS) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = (func_id << 16 | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ } else {
npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
- npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
- npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
-
- dev_info(&adapter->pdev->dev,
- "phy port: %d switch_mode: %d,\n"
- "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
- "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
- npar_info->phys_port, npar_info->switch_mode,
- npar_info->max_tx_ques, npar_info->max_rx_ques,
- npar_info->min_tx_bw, npar_info->max_tx_bw,
- npar_info->max_mtu, npar_info->capabilities);
- } else {
- dev_err(&adapter->pdev->dev,
- "Failed to get nic info%d\n", err);
- err = -EIO;
}
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
- nic_dma_t);
+ nic_dma_t);
+
return err;
}
/* Configure a NIC partition */
-int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
{
int err = -EIO;
dma_addr_t nic_dma_t;
@@ -765,12 +907,11 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
nic_info->pci_func = cpu_to_le16(nic->pci_func);
@@ -784,13 +925,14 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = ((nic->pci_func << 16) | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -798,46 +940,53 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
err = -EIO;
}
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
- nic_dma_t);
+ nic_dma_t);
+
return err;
}
/* Get PCI Info of a partition */
-int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
- struct qlcnic_pci_info *pci_info)
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
{
- int err = 0, i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+ size_t pci_size = npar_size * ahw->max_vnic_func;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_pci_info_le *npar;
struct qlcnic_cmd_args cmd;
dma_addr_t pci_info_dma_t;
- struct qlcnic_pci_info_le *npar;
void *pci_info_addr;
- size_t npar_size = sizeof(struct qlcnic_pci_info_le);
- size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
+ int err = 0, i;
- pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t, GFP_KERNEL);
+ pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
- memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO;
- cmd.req.arg1 = MSD(pci_info_dma_t);
- cmd.req.arg2 = LSD(pci_info_dma_t);
- cmd.req.arg3 = pci_size;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
-
- adapter->ahw->act_pci_func = 0;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(pci_info_dma_t);
+ cmd.req.arg[2] = LSD(pci_info_dma_t);
+ cmd.req.arg[3] = pci_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ ahw->total_nic_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
+ for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
pci_info->id = le16_to_cpu(npar->id);
pci_info->active = le16_to_cpu(npar->active);
+ if (!pci_info->active)
+ continue;
pci_info->type = le16_to_cpu(npar->type);
- if (pci_info->type == QLCNIC_TYPE_NIC)
- adapter->ahw->act_pci_func++;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
pci_info->default_port =
le16_to_cpu(npar->default_port);
pci_info->tx_min_bw =
@@ -852,41 +1001,56 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
pci_info_dma_t);
+
return err;
}
/* Configure eSwitch for port mirroring */
int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
- u8 enable_mirroring, u8 pci_func)
+ u8 enable_mirroring, u8 pci_func)
{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
int err = -EIO;
u32 arg1;
- struct qlcnic_cmd_args cmd;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
- !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_PORTMIRRORING);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to configure port mirroring%d on eswitch:%d\n",
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
pci_func, id);
- } else {
- dev_info(&adapter->pdev->dev,
- "Configured eSwitch %d for port mirroring:%d\n",
- id, pci_func);
- }
+ else
+ dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
+ pci_func, id);
+ qlcnic_free_mbx_args(&cmd);
return err;
}
@@ -912,24 +1076,23 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
return -EIO;
}
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
+ if (!stats_addr)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_STATS);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
@@ -947,8 +1110,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
esw_stats->numbytes = le64_to_cpu(stats->numbytes);
}
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
- stats_dma_t);
+ stats_dma_t);
+
return err;
}
@@ -963,23 +1129,22 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
void *stats_addr;
int err;
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev,
- "%s: Unable to allocate memory.\n", __func__);
+ if (mac_stats == NULL)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
- cmd.req.arg1 = stats_size << 16;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
+ if (!stats_addr)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+ if (err)
+ goto out_free_dma;
+ cmd.req.arg[1] = stats_size << 16;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
@@ -1001,10 +1166,17 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get mac stats failed, err=%d.\n", __func__, err);
}
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_dma:
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
- stats_dma_t);
+ stats_dma_t);
+
return err;
}
@@ -1032,7 +1204,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
esw_stats->context_id = eswitch;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].phy_port != eswitch)
continue;
@@ -1065,15 +1237,16 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
-
- u32 arg1;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ int err;
+ u32 arg1;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (ahw->op_mode != QLCNIC_MGMT_FUNC)
return -EIO;
if (func_esw == QLCNIC_STATS_PORT) {
- if (port >= QLCNIC_MAX_PCI_FUNC)
+ if (port >= ahw->max_vnic_func)
goto err_ret;
} else if (func_esw == QLCNIC_STATS_ESWITCH) {
if (port >= QLCNIC_NIU_MAX_XG_PORTS)
@@ -1088,43 +1261,48 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
arg1 |= BIT_14 | rx_tx << 15;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- return cmd.rsp.cmd;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_STATS);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
err_ret:
- dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
- "rx_ctx=%d\n", func_esw, port, rx_tx);
+ dev_err(&adapter->pdev->dev,
+ "Invalid args func_esw %d port %d rx_ctx %d\n",
+ func_esw, port, rx_tx);
return -EIO;
}
-static int
-__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
- u32 *arg1, u32 *arg2)
+static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ u32 *arg1, u32 *arg2)
{
- int err = -EIO;
+ struct device *dev = &adapter->pdev->dev;
struct qlcnic_cmd_args cmd;
- u8 pci_func;
- pci_func = (*arg1 >> 8);
+ u8 pci_func = *arg1 >> 8;
+ int err;
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG;
- cmd.req.arg1 = *arg1;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- *arg1 = cmd.rsp.arg1;
- *arg2 = cmd.rsp.arg2;
- err = cmd.rsp.cmd;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+ if (err)
+ return err;
- if (err == QLCNIC_RCODE_SUCCESS) {
- dev_info(&adapter->pdev->dev,
- "eSwitch port config for pci func %d\n", pci_func);
- } else {
- dev_err(&adapter->pdev->dev,
- "Failed to get eswitch port config for pci func %d\n",
- pci_func);
- }
+ cmd.req.arg[1] = *arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg[1];
+ *arg2 = cmd.rsp.arg[2];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS)
+ dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
+ pci_func);
+ else
+ dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
+ pci_func);
return err;
}
/* Configure eSwitch port
@@ -1137,13 +1315,18 @@ op_type = 1 for port vlan_id
int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
int err = -EIO, index;
u32 arg1, arg2 = 0;
- struct qlcnic_cmd_args cmd;
u8 pci_func;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
+
pci_func = esw_cfg->pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
@@ -1178,6 +1361,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg2 &= ~BIT_3;
break;
case QLCNIC_ADD_VLAN:
+ arg1 &= ~(0x0ffff << 16);
arg1 |= (BIT_2 | BIT_5);
arg1 |= (esw_cfg->vlan_id << 16);
break;
@@ -1186,23 +1370,27 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg1 &= ~(0x0ffff << 16);
break;
default:
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+ __func__, esw_cfg->op_mode);
return err;
}
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = arg2;
- qlcnic_issue_cmd(adapter, &cmd);
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_ESWITCH);
+ if (err)
+ return err;
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to configure eswitch pci func %d\n", pci_func);
- } else {
- dev_info(&adapter->pdev->dev,
- "Configured eSwitch for pci func %d\n", pci_func);
- }
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
+ pci_func);
+ else
+ dev_info(dev, "Configured eSwitch for vNIC function %d\n",
+ pci_func);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 00000000000..561cb11ca58
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1147 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include "qlcnic.h"
+
+#define QLC_DCB_NUM_PARAM 3
+#define QLC_DCB_LOCAL_IDX 0
+#define QLC_DCB_OPER_IDX 1
+#define QLC_DCB_PEER_IDX 2
+
+#define QLC_DCB_GET_MAP(V) (1 << V)
+
+#define QLC_DCB_FW_VER 0x2
+#define QLC_DCB_MAX_TC 0x8
+#define QLC_DCB_MAX_APP 0x8
+#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
+#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
+
+#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
+#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
+#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
+#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
+#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
+#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
+#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
+#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
+#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
+#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
+
+#define QLC_DCB_LOCAL_PARAM_FWID 0x3
+#define QLC_DCB_OPER_PARAM_FWID 0x1
+#define QLC_DCB_PEER_PARAM_FWID 0x2
+
+#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
+#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
+#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
+#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
+
+#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
+#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
+#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
+#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
+#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
+#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
+
+static void qlcnic_dcb_aen_work(struct work_struct *);
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+struct qlcnic_dcb_capability {
+ bool tsa_capability;
+ bool ets_capability;
+ u8 max_num_tc;
+ u8 max_ets_tc;
+ u8 max_pfc_tc;
+ u8 dcb_capability;
+};
+
+struct qlcnic_dcb_param {
+ u32 hdr_prio_pfc_map[2];
+ u32 prio_pg_map[2];
+ u32 pg_bw_map[2];
+ u32 pg_tsa_map[2];
+ u32 app[QLC_DCB_MAX_APP];
+};
+
+struct qlcnic_dcb_mbx_params {
+ /* 1st local, 2nd operational 3rd remote */
+ struct qlcnic_dcb_param type[3];
+ u32 prio_tc_map;
+};
+
+struct qlcnic_82xx_dcb_param_mbx_le {
+ __le32 hdr_prio_pfc_map[2];
+ __le32 prio_pg_map[2];
+ __le32 pg_bw_map[2];
+ __le32 pg_tsa_map[2];
+ __le32 app[QLC_DCB_MAX_APP];
+};
+
+enum qlcnic_dcb_selector {
+ QLC_SELECTOR_DEF = 0x0,
+ QLC_SELECTOR_ETHER,
+ QLC_SELECTOR_TCP,
+ QLC_SELECTOR_UDP,
+};
+
+enum qlcnic_dcb_prio_type {
+ QLC_PRIO_NONE = 0,
+ QLC_PRIO_GROUP,
+ QLC_PRIO_LINK,
+};
+
+enum qlcnic_dcb_pfc_type {
+ QLC_PFC_DISABLED = 0,
+ QLC_PFC_FULL,
+ QLC_PFC_TX,
+ QLC_PFC_RX
+};
+
+struct qlcnic_dcb_prio_cfg {
+ bool valid;
+ enum qlcnic_dcb_pfc_type pfc_type;
+};
+
+struct qlcnic_dcb_pg_cfg {
+ bool valid;
+ u8 total_bw_percent; /* of Link/ port BW */
+ u8 prio_count;
+ u8 tsa_type;
+};
+
+struct qlcnic_dcb_tc_cfg {
+ bool valid;
+ struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
+ enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
+ u8 link_percent; /* % of link bandwidth */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 up_tc_map;
+ u8 pgid;
+};
+
+struct qlcnic_dcb_app {
+ bool valid;
+ enum qlcnic_dcb_selector selector;
+ u16 protocol;
+ u8 priority;
+};
+
+struct qlcnic_dcb_cee {
+ struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
+ struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
+ struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
+ bool tc_param_valid;
+ bool pfc_mode_enable;
+};
+
+struct qlcnic_dcb_cfg {
+ /* 0 - local, 1 - operational, 2 - remote */
+ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
+ struct qlcnic_dcb_capability capability;
+ u32 version;
+};
+
+static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
+ .aen_handler = qlcnic_83xx_dcb_aen_handler,
+};
+
+static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
+ .aen_handler = qlcnic_82xx_dcb_aen_handler,
+};
+
+static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_NUMAPP(val);
+ else
+ return QLC_83XX_DCB_GET_NUMAPP(val);
+}
+
+static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_PFC_VALID(val);
+ else
+ return QLC_83XX_DCB_PFC_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_TSA_VALID(val);
+ else
+ return QLC_83XX_DCB_TSA_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
+ else
+ return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
+}
+
+static int qlcnic_dcb_prio_count(u8 up_tc_map)
+{
+ int j;
+
+ for (j = 0; j < QLC_DCB_MAX_TC; j++)
+ if (up_tc_map & QLC_DCB_GET_MAP(j))
+ break;
+
+ return j;
+}
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+}
+
+static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
+ else if (qlcnic_83xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
+}
+
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
+ if (!dcb)
+ return -ENOMEM;
+
+ adapter->dcb = dcb;
+ dcb->adapter = adapter;
+ qlcnic_set_dcb_ops(adapter);
+ dcb->state = 0;
+
+ return 0;
+}
+
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_adapter *adapter;
+
+ if (!dcb)
+ return;
+
+ adapter = dcb->adapter;
+
+ while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&dcb->aen_work);
+
+ if (dcb->wq) {
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+ }
+
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+ kfree(dcb->param);
+ dcb->param = NULL;
+ kfree(dcb);
+ adapter->dcb = NULL;
+}
+
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ qlcnic_dcb_get_hw_capability(dcb);
+ qlcnic_dcb_get_cee_cfg(dcb);
+}
+
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ int err = 0;
+
+ INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
+
+ dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
+ if (!dcb->wq) {
+ dev_err(&dcb->adapter->pdev->dev,
+ "DCB workqueue allocation failed. DCB will be disabled\n");
+ return -1;
+ }
+
+ dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
+ if (!dcb->cfg) {
+ err = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
+ if (!dcb->param) {
+ err = -ENOMEM;
+ goto out_free_cfg;
+ }
+
+ return 0;
+out_free_cfg:
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+
+out_free_wq:
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+
+ return err;
+}
+
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_out;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX capability, err %d\n", err);
+ } else {
+ mbx_out = cmd.rsp.arg[1];
+ if (buf)
+ memcpy(buf, &mbx_out, sizeof(u32));
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
+{
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
+
+ err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
+ if (err)
+ return err;
+
+ mbx_out = *val;
+ if (QLC_DCB_TSA_SUPPORT(mbx_out))
+ cap->tsa_capability = true;
+
+ if (QLC_DCB_ETS_SUPPORT(mbx_out))
+ cap->ets_capability = true;
+
+ cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
+ cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
+ cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
+
+ if (cap->max_num_tc > QLC_DCB_MAX_TC ||
+ cap->max_ets_tc > cap->max_num_tc ||
+ cap->max_pfc_tc > cap->max_num_tc) {
+ dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_cfg *cfg = dcb->cfg;
+ struct qlcnic_dcb_capability *cap;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+ if (err)
+ return err;
+
+ cap = &cfg->capability;
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+ char *buf, u8 type)
+{
+ u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t cardrsp_phys_addr;
+ struct qlcnic_dcb_param rsp;
+ struct qlcnic_cmd_args cmd;
+ u64 phys_addr;
+ void *addr;
+ int err, i;
+
+ switch (type) {
+ case QLC_DCB_LOCAL_PARAM_FWID:
+ case QLC_DCB_OPER_PARAM_FWID:
+ case QLC_DCB_PEER_PARAM_FWID:
+ break;
+ default:
+ dev_err(dev, "Invalid parameter type %d\n", type);
+ return -EINVAL;
+ }
+
+ addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+
+ prsp_le = addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ goto out_free_rsp;
+
+ phys_addr = cardrsp_phys_addr;
+ cmd.req.arg[1] = size | (type << 16);
+ cmd.req.arg[2] = MSD(phys_addr);
+ cmd.req.arg[3] = LSD(phys_addr);
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
+ goto out;
+ }
+
+ memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
+ rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
+ rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
+ rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
+ rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
+ rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
+ rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
+ rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
+ rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++)
+ rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
+
+ if (buf)
+ memcpy(buf, &rsp, size);
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_mbx_params *mbx;
+ int err;
+
+ mbx = dcb->param;
+ if (!mbx)
+ return 0;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
+ QLC_DCB_LOCAL_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
+ QLC_DCB_OPER_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
+ QLC_DCB_PEER_PARAM_FWID);
+ if (err)
+ return err;
+
+ mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
+
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+ return err;
+}
+
+static void qlcnic_dcb_aen_work(struct work_struct *work)
+{
+ struct qlcnic_dcb *dcb;
+
+ dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
+
+ qlcnic_dcb_get_cee_cfg(dcb);
+ clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
+}
+
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ return;
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+ if (err)
+ return err;
+
+ if (mbx_out & BIT_2)
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
+ if (mbx_out & BIT_3)
+ cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
+ if (cap->dcb_capability)
+ cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+ char *buf, u8 idx)
+{
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_dcb_mbx_params mbx_out;
+ int err, i, j, k, max_app, size;
+ struct qlcnic_dcb_param *each;
+ struct qlcnic_cmd_args cmd;
+ u32 val;
+ char *p;
+
+ size = 0;
+ memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
+ memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ return err;
+
+ cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX param, err %d\n", err);
+ goto out;
+ }
+
+ mbx_out.prio_tc_map = cmd.rsp.arg[1];
+ p = memcpy(buf, &mbx_out, sizeof(u32));
+ k = 2;
+ p += sizeof(u32);
+
+ for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
+ each = &mbx_out.type[j];
+
+ each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
+ each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
+ each->prio_pg_map[0] = cmd.rsp.arg[k++];
+ each->prio_pg_map[1] = cmd.rsp.arg[k++];
+ each->pg_bw_map[0] = cmd.rsp.arg[k++];
+ each->pg_bw_map[1] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[0] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[1] = cmd.rsp.arg[k++];
+ val = each->hdr_prio_pfc_map[0];
+
+ max_app = qlcnic_dcb_get_num_app(adapter, val);
+ for (i = 0; i < max_app; i++)
+ each->app[i] = cmd.rsp.arg[i + k];
+
+ size = 16 * sizeof(u32);
+ memcpy(p, &each->hdr_prio_pfc_map[0], size);
+ p += size;
+ if (j == 0)
+ k = 18;
+ else
+ k = 34;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ int err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
+ if (err)
+ return err;
+
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+ return err;
+}
+
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+ u32 *val = data;
+
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ return;
+
+ if (*val & BIT_8)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+ else
+ clear_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 i, tc, pgid;
+
+ for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
+ tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
+ tc_cfg = &type->tc_cfg[tc];
+ tc_cfg->valid = true;
+ tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
+
+ if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
+ type->pfc_mode_enable) {
+ tc_cfg->prio_cfg[i].valid = true;
+ tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
+ }
+
+ if (i < 4)
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
+ else
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
+
+ tc_cfg->pgid = pgid;
+
+ tc_cfg->prio_type = QLC_PRIO_LINK;
+ type->pg_cfg[tc_cfg->pgid].prio_count++;
+ }
+}
+
+static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_pg_cfg *pg_cfg;
+ u8 i, tsa, bw_per;
+
+ for (i = 0; i < QLC_DCB_MAX_PG; i++) {
+ pg_cfg = &type->pg_cfg[i];
+ pg_cfg->valid = true;
+
+ if (i < 4) {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
+ } else {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
+ }
+
+ pg_cfg->total_bw_percent = bw_per;
+ pg_cfg->tsa_type = tsa;
+ }
+}
+
+static void
+qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_app *app;
+ u8 i, num_app, map, cnt;
+ struct dcb_app new_app;
+
+ num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
+ for (i = 0; i < num_app; i++) {
+ app = &type->app[i];
+ app->valid = true;
+
+ /* Only for CEE (-1) */
+ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
+ new_app.selector = app->selector;
+ app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
+ new_app.protocol = app->protocol;
+ map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
+ cnt = qlcnic_dcb_prio_count(map);
+
+ if (cnt >= QLC_DCB_MAX_TC)
+ cnt = 0;
+
+ app->priority = cnt;
+ new_app.priority = cnt;
+
+ if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
+ dcb_setapp(adapter->netdev, &new_app);
+ }
+}
+
+static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
+ struct qlcnic_dcb_param *each = &mbx->type[idx];
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cee *type = &cfg->type[idx];
+
+ type->tc_param_valid = false;
+ type->pfc_mode_enable = false;
+ memset(type->tc_cfg, 0,
+ sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
+ memset(type->pg_cfg, 0,
+ sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
+
+ if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_pfc_tc)
+ type->pfc_mode_enable = true;
+
+ if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_ets_tc)
+ type->tc_param_valid = true;
+
+ qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
+ qlcnic_dcb_fill_cee_pg_params(each, type);
+ qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
+}
+
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
+ qlcnic_dcb_map_cee_params(adapter, i);
+
+ dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+}
+
+static u8 qlcnic_dcb_get_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
+}
+
+static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
+{
+ memcpy(addr, netdev->perm_addr, netdev->addr_len);
+}
+
+static void
+qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
+ u8 *pgid, u8 *bw_per, u8 *up_tc_map)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
+ struct qlcnic_dcb_cee *type;
+ u8 i, cnt, pg;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *prio = *pgid = *bw_per = *up_tc_map = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
+ return;
+
+ tc_cfg = &type->tc_cfg[tc];
+ if (!tc_cfg->valid)
+ return;
+
+ *pgid = tc_cfg->pgid;
+ *prio = tc_cfg->prio_type;
+ *up_tc_map = tc_cfg->up_tc_map;
+ pg = *pgid;
+
+ for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
+ temp = &type->tc_cfg[i];
+ if (temp->valid && (pg == temp->pgid))
+ cnt++;
+ }
+
+ tc_cfg->bwg_percent = (100 / cnt);
+ *bw_per = tc_cfg->bwg_percent;
+}
+
+static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_pg_cfg *pgcfg;
+ struct qlcnic_dcb_cee *type;
+
+ *bw_pct = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
+ return;
+
+ pgcfg = &type->pg_cfg[pgid];
+ if (!pgcfg->valid)
+ return;
+
+ *bw_pct = pgcfg->total_bw_percent;
+}
+
+static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 val = QLC_DCB_GET_MAP(prio);
+ struct qlcnic_dcb_cee *type;
+ u8 i;
+
+ *setting = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->pfc_mode_enable)
+ return;
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc_cfg = &type->tc_cfg[i];
+ if (!tc_cfg->valid)
+ continue;
+
+ if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
+ *setting = tc_cfg->prio_cfg[prio].pfc_type;
+ }
+}
+
+static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
+ u8 *cap)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcb->cfg->capability.dcb_capability;
+ break;
+ default:
+ *cap = false;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return -EINVAL;
+
+ switch (attr) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = cfg->capability.max_ets_tc;
+ return 0;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = cfg->capability.max_pfc_tc;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ return 0;
+
+ return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
+}
+
+static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ return cfg->capability.dcb_capability;
+}
+
+static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *type;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 1;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *flag = 0;
+
+ switch (fid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (type->tc_param_valid)
+ *flag |= DCB_FEATCFG_ENABLE;
+ else
+ *flag |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (type->pfc_mode_enable) {
+ if (type->tc_cfg[0].prio_cfg[0].pfc_type)
+ *flag |= DCB_FEATCFG_ENABLE;
+ } else {
+ *flag |= DCB_FEATCFG_ERROR;
+ }
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ *flag |= DCB_FEATCFG_ENABLE;
+ break;
+ default:
+ netdev_err(netdev, "Invalid Feature ID %d\n", fid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ *bw_pct = 0;
+}
+
+static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ int i;
+
+ memset(info, 0, sizeof(*info));
+ *app_count = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++) {
+ if (peer->app[i].valid)
+ (*app_count)++;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ struct qlcnic_dcb_app *app;
+ int i, j;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
+ app = &peer->app[i];
+ if (!app->valid)
+ continue;
+
+ table[j].selector = app->selector;
+ table[j].priority = app->priority;
+ table[j++].protocol = app->protocol;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ u8 i, j, k, map;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
+ if (!peer->pg_cfg[i].valid)
+ continue;
+
+ pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
+
+ for (k = 0; k < QLC_DCB_MAX_TC; k++) {
+ if (peer->tc_cfg[i].valid &&
+ (peer->tc_cfg[i].pgid == i)) {
+ map = peer->tc_cfg[i].up_tc_map;
+ pg->prio_pg[j++] = map;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_tc_cfg *tc;
+ struct qlcnic_dcb_cee *peer;
+ u8 i, setting, prio;
+
+ pfc->pfc_en = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc = &peer->tc_cfg[i];
+ prio = qlcnic_dcb_prio_count(tc->up_tc_map);
+
+ setting = 0;
+ qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
+ if (setting)
+ pfc->pfc_en |= QLC_DCB_GET_MAP(i);
+ }
+
+ pfc->tcs_supported = cfg->capability.max_pfc_tc;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
+ .getstate = qlcnic_dcb_get_state,
+ .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
+ .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
+ .getpfccfg = qlcnic_dcb_get_pfc_cfg,
+ .getcap = qlcnic_dcb_get_capability,
+ .getnumtcs = qlcnic_dcb_get_num_tcs,
+ .getapp = qlcnic_dcb_get_app,
+ .getpfcstate = qlcnic_dcb_get_pfc_state,
+ .getdcbx = qlcnic_dcb_get_dcbx,
+ .getfeatcfg = qlcnic_dcb_get_feat_cfg,
+
+ .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
+
+ .peer_getappinfo = qlcnic_dcb_peer_app_info,
+ .peer_getapptable = qlcnic_dcb_peer_app_table,
+ .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
+ .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 00000000000..3cf4a10fbe1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,122 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_DCBX_H
+#define __QLCNIC_DCBX_H
+
+#define QLCNIC_DCB_STATE 0
+#define QLCNIC_DCB_AEN_MODE 1
+
+#ifdef CONFIG_QLCNIC_DCB
+int qlcnic_register_dcb(struct qlcnic_adapter *);
+#else
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+struct qlcnic_dcb;
+
+struct qlcnic_dcb_ops {
+ int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+ int (*get_hw_capability) (struct qlcnic_dcb *);
+ int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+ void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+ void (*aen_handler) (struct qlcnic_dcb *, void *);
+ int (*get_cee_cfg) (struct qlcnic_dcb *);
+ void (*get_info) (struct qlcnic_dcb *);
+ int (*attach) (struct qlcnic_dcb *);
+ void (*free) (struct qlcnic_dcb *);
+};
+
+struct qlcnic_dcb {
+ struct qlcnic_dcb_mbx_params *param;
+ struct qlcnic_adapter *adapter;
+ struct delayed_work aen_work;
+ struct workqueue_struct *wq;
+ struct qlcnic_dcb_ops *ops;
+ struct qlcnic_dcb_cfg *cfg;
+ unsigned long state;
+};
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+ kfree(dcb);
+ dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(dcb);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(dcb);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(dcb, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(dcb, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(dcb);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+ if (dcb && dcb->ops->aen_handler)
+ dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(dcb);
+}
+
+static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+ if (dcb && qlcnic_dcb_attach(dcb))
+ qlcnic_clear_dcb_ops(dcb);
+}
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 74b98110c5b..1b7f3dbae28 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -22,53 +22,47 @@ struct qlcnic_stats {
#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+static const u32 qlcnic_fw_dump_level[] = {
+ 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
- {"xmit_called",
- QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
- {"xmit_finished",
- QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
- {"rx_dropped",
- QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
- {"tx_dropped",
- QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
- {"csummed",
- QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
- {"rx_pkts",
- QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
- {"lro_pkts",
- QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
- {"rx_bytes",
- QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
- {"tx_bytes",
- QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
- {"lrobytes",
- QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
- {"lso_frames",
- QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
- {"xmit_on",
- QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
- {"xmit_off",
- QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
- {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
- QLC_OFF(stats.skb_alloc_failure)},
- {"null rxbuf",
- QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
- {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
- QLC_OFF(stats.rx_dma_map_error)},
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+ QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+ QLC_OFF(stats.xmitfinished)},
{"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
- QLC_OFF(stats.tx_dma_map_error)},
+ QLC_OFF(stats.tx_dma_map_error)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
+ QLC_OFF(stats.encap_lso_frames)},
+ {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
+ QLC_OFF(stats.encap_tx_csummed)},
+ {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
+ QLC_OFF(stats.encap_rx_csummed)},
+ {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
+ QLC_OFF(stats.skb_alloc_failure)},
+ {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
+ QLC_OFF(stats.mac_filter_limit_overrun)},
+ {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ QLC_OFF(stats.spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
- "rx unicast frames",
- "rx multicast frames",
- "rx broadcast frames",
- "rx dropped frames",
- "rx errors",
- "rx local frames",
- "rx numbytes",
"tx unicast frames",
"tx multicast frames",
"tx broadcast frames",
@@ -76,9 +70,24 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx errors",
"tx local frames",
"tx numbytes",
+ "rx unicast frames",
+ "rx multicast frames",
+ "rx broadcast frames",
+ "rx dropped frames",
+ "rx errors",
+ "rx local frames",
+ "rx numbytes",
};
-static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
+static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_tx_bytes",
+ "ctx_tx_pkts",
+ "ctx_tx_errors",
+ "ctx_tx_dropped_pkts",
+ "ctx_tx_num_buffers",
+};
+
+static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
"mac_tx_frames",
"mac_tx_bytes",
"mac_tx_mcast_pkts",
@@ -110,35 +119,99 @@ static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
"mac_rx_length_large",
"mac_rx_jabber",
"mac_rx_dropped",
- "mac_rx_crc_error",
+ "mac_crc_error",
"mac_align_error",
+ "eswitch_frames",
+ "eswitch_bytes",
+ "eswitch_multicast_frames",
+ "eswitch_broadcast_frames",
+ "eswitch_unicast_frames",
+ "eswitch_error_free_frames",
+ "eswitch_error_free_bytes",
};
-#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
-#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
-#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
-#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "xmit_on",
+ "xmit_off",
+ "xmit_called",
+ "xmit_finished",
+ "tx_bytes",
+};
+
+#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
+static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_rx_bytes",
+ "ctx_rx_pkts",
+ "ctx_lro_pkt_cnt",
+ "ctx_ip_csum_error",
+ "ctx_rx_pkts_wo_ctx",
+ "ctx_rx_pkts_drop_wo_sds_on_card",
+ "ctx_rx_pkts_drop_wo_sds_on_host",
+ "ctx_rx_osized_pkts",
+ "ctx_rx_pkts_dropped_wo_rds",
+ "ctx_rx_unexpected_mcast_pkts",
+ "ctx_invalid_mac_address",
+ "ctx_rx_rds_ring_prim_attempted",
+ "ctx_rx_rds_ring_prim_success",
+ "ctx_num_lro_flows_added",
+ "ctx_num_lro_flows_removed",
+ "ctx_num_lro_flows_active",
+ "ctx_pkts_dropped_unknown",
+};
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
- "External_Loopback_offline"
+ "External_Loopback_offline",
+ "EEPROM_Test_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
-#define QLCNIC_RING_REGS_COUNT 20
-#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
+{
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
+{
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
+{
+ int len = -1;
+
+ if (qlcnic_82xx_check(adapter)) {
+ len = qlcnic_82xx_statistics(adapter);
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ } else if (qlcnic_83xx_check(adapter)) {
+ len = qlcnic_83xx_statistics(adapter);
+ }
+
+ return len;
+}
+
+#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
+
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
- CRB_CMDPEG_STATE,
- CRB_RCVPEG_STATE,
- CRB_XG_STATE_P3P,
- CRB_FW_CAPABILITIES_1,
- ISR_INT_STATE_REG,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_FW_CAPABILITIES,
QLCNIC_CRB_DRV_ACTIVE,
QLCNIC_CRB_DEV_STATE,
QLCNIC_CRB_DRV_STATE,
@@ -148,6 +221,13 @@ static const u32 diag_registers[] = {
QLCNIC_PEG_ALIVE_COUNTER,
QLCNIC_PEG_HALT_STATUS1,
QLCNIC_PEG_HALT_STATUS2,
+ -1
+};
+
+
+static const u32 ext_diag_registers[] = {
+ CRB_XG_STATE_P3P,
+ ISR_INT_STATE_REG,
QLCNIC_CRB_PEG_NET_0+0x3c,
QLCNIC_CRB_PEG_NET_1+0x3c,
QLCNIC_CRB_PEG_NET_2+0x3c,
@@ -155,13 +235,30 @@ static const u32 diag_registers[] = {
-1
};
-#define QLCNIC_MGMT_API_VERSION 2
-#define QLCNIC_DEV_INFO_SIZE 1
-#define QLCNIC_ETHTOOL_REGS_VER 2
+#define QLCNIC_MGMT_API_VERSION 3
+#define QLCNIC_ETHTOOL_REGS_VER 4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+ int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+ (adapter->max_rds_rings * 2) +
+ (adapter->drv_sds_rings * 3) + 5;
+ return ring_regs_cnt * sizeof(u32);
+}
+
static int qlcnic_get_regs_len(struct net_device *dev)
{
- return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
- QLCNIC_DEV_INFO_SIZE + 1;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 len;
+
+ if (qlcnic_83xx_check(adapter))
+ len = qlcnic_83xx_get_regs_len(adapter);
+ else
+ len = sizeof(ext_diag_registers) + sizeof(diag_registers);
+
+ len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+ len += qlcnic_get_ring_regs_len(adapter);
+ return len;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -174,10 +271,9 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 fw_major, fw_minor, fw_build;
-
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
@@ -188,11 +284,13 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
sizeof(drvinfo->version));
}
-static int
-qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
- int check_sfp_module = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 speed, reg;
+ int check_sfp_module = 0, err = 0;
+ u16 pcifn = ahw->pci_func;
/* read which mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -213,9 +311,9 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->autoneg = adapter->ahw->link_autoneg;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
- u32 val;
+ u32 val = 0;
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
- val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
@@ -224,10 +322,17 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->advertising = ADVERTISED_10000baseT_Full;
}
- if (netif_running(dev) && adapter->ahw->has_link_events) {
- ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
- ecmd->autoneg = adapter->ahw->link_autoneg;
- ecmd->duplex = adapter->ahw->link_duplex;
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ if (ahw->linkup) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn), &err);
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
+
+ ethtool_cmd_speed_set(ecmd, ahw->link_speed);
+ ecmd->autoneg = ahw->link_autoneg;
+ ecmd->duplex = ahw->link_duplex;
goto skip;
}
@@ -269,8 +374,8 @@ skip:
case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
ecmd->advertising |= ADVERTISED_TP;
ecmd->supported |= SUPPORTED_TP;
- check_sfp_module = netif_running(dev) &&
- adapter->ahw->has_link_events;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
case QLCNIC_BRDTYPE_P3P_10G_XFP:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
@@ -284,8 +389,8 @@ skip:
ecmd->advertising |=
(ADVERTISED_FIBRE | ADVERTISED_TP);
ecmd->port = PORT_FIBRE;
- check_sfp_module = netif_running(dev) &&
- adapter->ahw->has_link_events;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
} else {
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -321,16 +426,24 @@ skip:
return 0;
}
-static int
-qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int qlcnic_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
{
- u32 config = 0;
- u32 ret = 0;
struct qlcnic_adapter *adapter = netdev_priv(dev);
- if (adapter->ahw->port_type != QLCNIC_GBE)
- return -EOPNOTSUPP;
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_get_settings(adapter, ecmd);
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_get_settings(adapter, ecmd);
+
+ return -EIO;
+}
+
+static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ u32 ret = 0, config = 0;
/* read which mode */
if (ecmd->duplex)
config |= 0x1;
@@ -358,6 +471,24 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
else if (ret)
return -EIO;
+ return ret;
+}
+
+static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u32 ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (qlcnic_83xx_check(adapter))
+ ret = qlcnic_83xx_set_settings(adapter, ecmd);
+ else
+ ret = qlcnic_set_port_config(adapter, ecmd);
+
+ if (!ret)
+ return ret;
adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
adapter->ahw->link_duplex = ecmd->duplex;
@@ -370,52 +501,95 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return dev->netdev_ops->ndo_open(dev);
}
+static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
+ u32 *regs_buff)
+{
+ int i, j = 0, err = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
+ j = 0;
+ while (ext_diag_registers[j] != -1)
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
+ &err);
+ return i;
+}
+
static void
qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_tx_ring *tx_ring;
u32 *regs_buff = p;
- int ring, i = 0, j = 0;
+ int ring, i = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
+
regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
(adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
- for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
- regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
+ if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ regs_buff[2] = adapter->ahw->max_vnic_func;
+
+ if (qlcnic_82xx_check(adapter))
+ i = qlcnic_82xx_get_registers(adapter, regs_buff);
+ else
+ i = qlcnic_83xx_get_registers(adapter, regs_buff);
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
-
- regs_buff[i++] = 1; /* No. of tx ring */
- regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
- regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
-
- regs_buff[i++] = 2; /* No. of rx ring */
- regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
- regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+ /* Marker btw regs and TX ring count */
+ regs_buff[i++] = 0xFFEFCDAB;
+
+ regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+ regs_buff[i++] = tx_ring->sw_consumer;
+ regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+ regs_buff[i++] = tx_ring->producer;
+ if (tx_ring->crb_intr_mask)
+ regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+ else
+ regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+ }
- regs_buff[i++] = adapter->max_sds_rings;
+ regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_rings = &recv_ctx->rds_rings[ring];
+ regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+ regs_buff[i++] = rds_rings->producer;
+ }
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &(recv_ctx->sds_rings[ring]);
regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ regs_buff[i++] = sds_ring->consumer;
+ regs_buff[i++] = readl(sds_ring->crb_intr_mask);
}
}
static u32 qlcnic_test_link(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err = 0;
u32 val;
- val = QLCRD32(adapter, CRB_XG_STATE_P3P);
+ if (qlcnic_83xx_check(adapter)) {
+ val = qlcnic_83xx_test_link(adapter);
+ return (val & 1) ? 0 : 1;
+ }
+ val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
+ if (err == -EIO)
+ return err;
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
}
@@ -426,8 +600,10 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int offset;
- int ret;
+ int ret = -1;
+ if (qlcnic_83xx_check(adapter))
+ return 0;
if (eeprom->len == 0)
return -EINVAL;
@@ -435,8 +611,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
((adapter->pdev)->device << 16);
offset = eeprom->offset;
- ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
- eeprom->len);
+ if (qlcnic_82xx_check(adapter))
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
if (ret < 0)
return ret;
@@ -505,37 +682,91 @@ qlcnic_set_ringparam(struct net_device *dev,
return qlcnic_reset_context(adapter);
}
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+ u8 rx_ring, u8 tx_ring)
+{
+ if (rx_ring == 0 || tx_ring == 0)
+ return -EINVAL;
+
+ if (rx_ring != 0) {
+ if (rx_ring > adapter->max_sds_rings) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ rx_ring, adapter->max_sds_rings);
+ return -EINVAL;
+ }
+ }
+
+ if (tx_ring != 0) {
+ if (tx_ring > adapter->max_tx_rings) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+ tx_ring, adapter->max_tx_rings);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
- int min;
struct qlcnic_adapter *adapter = netdev_priv(dev);
- min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
- channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = adapter->ahw->max_tx_ques;
-
- channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->ahw->max_tx_ques;
+ channel->max_rx = adapter->max_sds_rings;
+ channel->max_tx = adapter->max_tx_rings;
+ channel->rx_count = adapter->drv_sds_rings;
+ channel->tx_count = adapter->drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
- struct ethtool_channels *channel)
+ struct ethtool_channels *channel)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count != channel->max_tx)
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
+ return -EINVAL;
+ }
+
+ if (channel->other_count || channel->combined_count)
return -EINVAL;
- err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
+ err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+ channel->tx_count);
if (err)
return err;
- err = qlcnic_set_max_rss(adapter, channel->rx_count);
- netdev_info(dev, "allocated 0x%x sds rings\n",
- adapter->max_sds_rings);
+ if (adapter->drv_sds_rings != channel->rx_count) {
+ err = qlcnic_validate_rings(adapter, channel->rx_count,
+ QLCNIC_RX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u SDS rings\n",
+ channel->rx_count);
+ return err;
+ }
+ adapter->drv_rss_rings = channel->rx_count;
+ }
+
+ if (adapter->drv_tx_rings != channel->tx_count) {
+ err = qlcnic_validate_rings(adapter, channel->tx_count,
+ QLCNIC_TX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u Tx rings\n",
+ channel->tx_count);
+ return err;
+ }
+ adapter->drv_tss_rings = channel->tx_count;
+ }
+
+ adapter->flags |= QLCNIC_TSS_RSS;
+
+ err = qlcnic_setup_rings(adapter);
+ netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+ adapter->drv_sds_rings, adapter->drv_tx_rings);
+
return err;
}
@@ -545,15 +776,24 @@ qlcnic_get_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
+ int err = 0;
__u32 val;
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_get_pauseparam(adapter, pause);
+ return;
+ }
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return;
pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
- val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
switch (port) {
case 0:
pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
@@ -573,7 +813,9 @@ qlcnic_get_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
- val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
if (port == 0)
pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
else
@@ -590,14 +832,20 @@ qlcnic_set_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
+ int err = 0;
__u32 val;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_pauseparam(adapter, pause);
+
/* read mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return err;
if (pause->rx_pause)
qlcnic_gb_rx_flowctl(val);
@@ -606,8 +854,11 @@ qlcnic_set_pauseparam(struct net_device *netdev,
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
switch (port) {
case 0:
if (pause->tx_pause)
@@ -643,7 +894,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return -EIO;
- val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
if (port == 0) {
if (pause->tx_pause)
qlcnic_xg_unset_xg0_mask(val);
@@ -667,24 +920,39 @@ static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
+ int err = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_reg_test(adapter);
- data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
+ if (err == -EIO)
+ return err;
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
return 0;
}
+static int qlcnic_eeprom_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return 0;
+
+ return qlcnic_83xx_flash_test(adapter);
+}
+
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
+
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
- return QLCNIC_TOTAL_STATS_LEN;
+ return qlcnic_dev_statistics_len(adapter);
default:
return -EOPNOTSUPP;
}
@@ -693,43 +961,53 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
- int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ int ret, drv_sds_rings = adapter->drv_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_interrupt_test(netdev);
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
if (ret)
- goto clear_it;
+ goto clear_diag_irq;
- adapter->ahw->diag_cnt = 0;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
- cmd.req.arg1 = adapter->ahw->pci_func;
- qlcnic_issue_cmd(adapter, &cmd);
- ret = cmd.rsp.cmd;
+ ahw->diag_cnt = 0;
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ if (ret)
+ goto free_diag_res;
+ cmd.req.arg[1] = ahw->pci_func;
+ ret = qlcnic_issue_cmd(adapter, &cmd);
if (ret)
goto done;
- msleep(10);
-
- ret = !adapter->ahw->diag_cnt;
+ usleep_range(1000, 12000);
+ ret = !ahw->diag_cnt;
done:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_free_mbx_args(&cmd);
+
+free_diag_res:
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
-clear_it:
- adapter->max_sds_rings = max_sds_rings;
+clear_diag_irq:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
return ret;
}
-#define QLCNIC_ILB_PKT_SIZE 64
-#define QLCNIC_NUM_ILB_PKT 16
-#define QLCNIC_ILB_MAX_RCV_LOOP 10
+#define QLCNIC_ILB_PKT_SIZE 64
+#define QLCNIC_NUM_ILB_PKT 16
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+#define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1
+#define QLCNIC_LB_PKT_POLL_COUNT 20
static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
{
@@ -750,7 +1028,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
}
-static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
@@ -761,33 +1039,32 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
-
adapter->ahw->diag_cnt = 0;
qlcnic_xmit_frame(skb, adapter->netdev);
-
loop = 0;
+
do {
- msleep(1);
+ msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
qlcnic_process_rcv_ring_diag(sds_ring);
- if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
+ if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
break;
} while (!adapter->ahw->diag_cnt);
dev_kfree_skb_any(skb);
if (!adapter->ahw->diag_cnt)
- QLCDB(adapter, DRV,
- "LB Test: packet #%d was not received\n", i + 1);
+ dev_warn(&adapter->pdev->dev,
+ "LB Test: packet #%d was not received\n",
+ i + 1);
else
cnt++;
}
if (cnt != i) {
- dev_warn(&adapter->pdev->dev, "LB Test failed\n");
- if (mode != QLCNIC_ILB_MODE) {
+ dev_err(&adapter->pdev->dev,
+ "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+ if (mode != QLCNIC_ILB_MODE)
dev_warn(&adapter->pdev->dev,
- "WARNING: Please make sure external"
- "loopback connector is plugged in\n");
- }
+ "WARNING: Please check loopback cable\n");
return -1;
}
return 0;
@@ -796,22 +1073,27 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
int ret;
- if (!(adapter->ahw->capabilities &
- QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
- netdev_info(netdev, "Firmware is not loopback test capable\n");
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_loopback_test(netdev, mode);
+
+ if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware do not support loopback test\n");
return -EOPNOTSUPP;
}
- QLCDB(adapter, DRV, "%s loopback test in progress\n",
- mode == QLCNIC_ILB_MODE ? "internal" : "external");
- if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
- netdev_warn(netdev, "Loopback test not supported for non "
- "privilege function\n");
+ dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported in nonprivileged mode\n");
return 0;
}
@@ -823,35 +1105,35 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
goto clear_it;
sds_ring = &adapter->recv_ctx->sds_rings[0];
-
ret = qlcnic_set_lb_mode(adapter, mode);
if (ret)
goto free_res;
- adapter->ahw->diag_cnt = 0;
+ ahw->diag_cnt = 0;
do {
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
- netdev_info(netdev, "firmware didnt respond to loopback"
- " configure request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
goto free_res;
} else if (adapter->ahw->diag_cnt) {
ret = adapter->ahw->diag_cnt;
goto free_res;
}
- } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
+ } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
ret = qlcnic_do_lb_test(adapter, mode);
- qlcnic_clear_lb_mode(adapter);
+ qlcnic_clear_lb_mode(adapter, mode);
free_res:
- qlcnic_diag_free_res(netdev, max_sds_rings);
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
clear_it:
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
@@ -878,20 +1160,25 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
+
if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
if (data[4])
eth_test->flags |= ETH_TEST_FL_FAILED;
eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
}
+
+ data[5] = qlcnic_eeprom_test(dev);
+ if (data[5])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
static void
-qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int index, i, j;
+ int index, i, num_stats;
switch (stringset) {
case ETH_SS_TEST:
@@ -899,19 +1186,49 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+ for (i = 0; i < adapter->drv_tx_rings; i++) {
+ for (index = 0; index < num_stats; index++) {
+ sprintf(data, "tx_queue_%d %s", i,
+ qlcnic_tx_queue_stats_strings[index]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
for (index = 0; index < QLCNIC_STATS_LEN; index++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
- for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_mac_stats_strings[j],
- ETH_GSTRING_LEN);
+
+ if (qlcnic_83xx_check(adapter)) {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_tx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_rx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ return;
+ } else {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
- for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
+ num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ for (i = 0; i < num_stats; index++, i++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_device_gstrings_stats[i],
ETH_GSTRING_LEN);
@@ -919,90 +1236,127 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
}
-static void
-qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
+static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
{
- int ind = *index;
-
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
(struct qlcnic_mac_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
} else if (type == QLCNIC_ESW_STATS) {
struct __qlcnic_esw_statistics *esw_stats =
(struct __qlcnic_esw_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->errors);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
+ }
+ return data;
+}
+
+void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
+ adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
+ adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called;
+ adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished;
+ adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes;
}
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ tx_ring = (struct qlcnic_host_tx_ring *)stats;
- *index = ind;
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+ return data;
}
-static void
-qlcnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 * data)
+static void qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret;
-
- for (index = 0; index < QLCNIC_STATS_LEN; index++) {
- char *p =
- (char *)adapter +
- qlcnic_gstrings_stats[index].stat_offset;
- data[index] =
- (qlcnic_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ int index, ret, length, size, tx_size, ring;
+ char *p;
+
+ tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN;
+
+ memset(data, 0, tx_size * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ tx_ring = &adapter->tx_ring[ring];
+ data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+ qlcnic_update_stats(adapter);
+ }
+ }
+
+ memset(data, 0, stats->n_stats * sizeof(u64));
+ length = QLCNIC_STATS_LEN;
+ for (index = 0; index < length; index++) {
+ p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
+ size = qlcnic_gstrings_stats[index].sizeof_stat;
+ *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
}
- /* Retrieve MAC statistics from firmware */
- memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
- qlcnic_get_mac_stats(adapter, &mac_stats);
- qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
+ if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->linkup)
+ qlcnic_83xx_get_stats(adapter, data);
+ return;
+ } else {
+ /* Retrieve MAC statistics from firmware */
+ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+ qlcnic_get_mac_stats(adapter, &mac_stats);
+ data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ }
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
@@ -1013,23 +1367,25 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
-
+ data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
+ qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
}
static int qlcnic_set_led(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
int err = -EIO, active = 1;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_led(dev, state);
+
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
@@ -1082,7 +1438,7 @@ static int qlcnic_set_led(struct net_device *dev,
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(dev, max_sds_rings);
+ qlcnic_diag_free_res(dev, drv_sds_rings);
if (!active || err)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
@@ -1095,15 +1451,20 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ int err = 0;
+ if (qlcnic_83xx_check(adapter))
+ return;
wol->supported = 0;
wol->wolopts = 0;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return;
if (wol_cfg & (1UL << adapter->portnum))
wol->supported |= WAKE_MAGIC;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
if (wol_cfg & (1UL << adapter->portnum))
wol->wolopts |= WAKE_MAGIC;
}
@@ -1113,15 +1474,22 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ int err = 0;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return err;
if (!(wol_cfg & (1 << adapter->portnum)))
return -EOPNOTSUPP;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
if (wol->wolopts & WAKE_MAGIC)
wol_cfg |= 1UL << adapter->portnum;
else
@@ -1141,6 +1509,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ethcoal)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return -EINVAL;
@@ -1150,45 +1519,31 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
* unsupported parameters are set.
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
- ethcoal->rx_max_coalesced_frames > 0xffff ||
- ethcoal->tx_coalesce_usecs ||
- ethcoal->tx_max_coalesced_frames ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
return -EINVAL;
- if (!ethcoal->rx_coalesce_usecs ||
- !ethcoal->rx_max_coalesced_frames) {
- adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
- adapter->ahw->coal.rx_time_us =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
- adapter->ahw->coal.rx_packets =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
- } else {
- adapter->ahw->coal.flag = 0;
- adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
- adapter->ahw->coal.rx_packets =
- ethcoal->rx_max_coalesced_frames;
- }
-
- qlcnic_config_intr_coalesce(adapter);
+ err = qlcnic_config_intr_coalesce(adapter, ethcoal);
- return 0;
+ return err;
}
static int qlcnic_get_intr_coalesce(struct net_device *netdev,
@@ -1201,6 +1556,8 @@ static int qlcnic_get_intr_coalesce(struct net_device *netdev,
ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
+ ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us;
+ ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets;
return 0;
}
@@ -1219,6 +1576,68 @@ static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
adapter->ahw->msg_enable = msglvl;
}
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = true;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump enabled\n");
+
+ return 0;
+}
+
+static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = false;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump disabled\n");
+
+ return 0;
+}
+
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool state;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
+ } else {
+ state = fw_dump->enable;
+ }
+
+ return state;
+}
+
static int
qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
@@ -1231,14 +1650,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
}
if (fw_dump->clr)
- dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
else
dump->len = 0;
- if (!fw_dump->enable)
+ if (!qlcnic_check_fw_dump_state(adapter))
dump->flag = ETH_FW_DUMP_DISABLE;
else
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
dump->version = adapter->fw_version;
return 0;
@@ -1263,9 +1682,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
netdev_info(netdev, "Dump not available\n");
return -EINVAL;
}
+
/* Copy template header first */
- copy_sz = fw_dump->tmpl_hdr->size;
- hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
+ copy_sz = fw_dump->tmpl_hdr_size;
+ hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
data = buffer;
for (i = 0; i < copy_sz/sizeof(u32); i++)
*data++ = cpu_to_le32(*hdr_ptr++);
@@ -1273,7 +1693,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
/* Copy captured dump data */
memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
dump->len = copy_sz + fw_dump->size;
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
/* Free dump area once data has been captured */
vfree(fw_dump->data);
@@ -1283,77 +1703,113 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
return 0;
}
+static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct net_device *netdev = adapter->netdev;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev,
+ "Can not change driver mask to 0x%x. FW dump not enabled\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ fw_dump->cap_mask = mask;
+
+ /* Store new capture mask in template header as well*/
+ qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
+ return 0;
+}
+
static int
qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
- int i;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- u32 state;
+ bool valid_mask = false;
+ int i, ret = 0;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
- if (!fw_dump->enable) {
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
netdev_info(netdev, "FW dump not enabled\n");
- return 0;
+ ret = -EOPNOTSUPP;
+ break;
}
+
if (fw_dump->clr) {
netdev_info(netdev,
- "Previous dump not cleared, not forcing dump\n");
- return 0;
+ "Previous dump not cleared, not forcing dump\n");
+ break;
}
+
netdev_info(netdev, "Forcing a FW dump\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
- if (fw_dump->enable && fw_dump->tmpl_hdr) {
- netdev_info(netdev, "Disabling FW dump\n");
- fw_dump->enable = 0;
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_disable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_ENABLE_FW_DUMP:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
- }
- if (!fw_dump->enable) {
- netdev_info(netdev, "Enabling FW dump\n");
- fw_dump->enable = 1;
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_enable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
- return 0;
+ break;
+
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- netdev_info(netdev, "Device in FAILED state\n");
- return 0;
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ netdev_info(netdev, "Device is in non-operational state\n");
+ break;
+
default:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
- for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
- if (val->flag == FW_DUMP_LEVELS[i]) {
- fw_dump->tmpl_hdr->drv_cap_mask =
- val->flag;
- netdev_info(netdev, "Driver mask changed to: 0x%x\n",
- fw_dump->tmpl_hdr->drv_cap_mask);
- return 0;
+
+ for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
+ if (val->flag == qlcnic_fw_dump_level[i]) {
+ valid_mask = true;
+ break;
}
}
- netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
- return -EINVAL;
+
+ if (valid_mask) {
+ ret = qlcnic_set_dump_mask(adapter, val->flag);
+ } else {
+ netdev_info(netdev, "Invalid dump level: 0x%x\n",
+ val->flag);
+ ret = -EINVAL;
+ }
}
- return 0;
+ return ret;
}
const struct ethtool_ops qlcnic_ethtool_ops = {
@@ -1387,9 +1843,32 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.set_dump = qlcnic_set_dump,
};
+const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .get_wol = qlcnic_get_wol,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+};
+
const struct ethtool_ops qlcnic_ethtool_failed_ops = {
- .get_settings = qlcnic_get_settings,
- .get_drvinfo = qlcnic_get_drvinfo,
- .set_msglevel = qlcnic_set_msglevel,
- .get_msglevel = qlcnic_get_msglevel,
+ .get_settings = qlcnic_get_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .set_dump = qlcnic_set_dump,
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 49cc1ac4f05..34e467b239a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include "qlcnic_hw.h"
+
/*
* The basic unit of access when reading/writing control registers.
*/
@@ -387,9 +389,6 @@ enum {
#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
-/* Lock IDs for ROM lock */
-#define ROM_LOCK_DRIVER 0x0d417340
-
/******************************************************************************
*
* Definitions specific to M25P flash
@@ -449,13 +448,10 @@ enum {
#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
-#define QLCNIC_PCI_MN_2M (0)
-#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
#define QLCNIC_PCI_CAMQM (0x04800000UL)
#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
-#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -491,7 +487,7 @@ enum {
#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
(QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
-
+#define MAX_CTL_CHECK 1000
#define TEST_AGT_CTRL (0x00)
#define TA_CTL_START BIT_0
@@ -499,44 +495,6 @@ enum {
#define TA_CTL_WRITE BIT_2
#define TA_CTL_BUSY BIT_3
-/*
- * Register offsets for MN
- */
-#define MIU_TEST_AGT_BASE (0x90)
-
-#define MIU_TEST_AGT_ADDR_LO (0x04)
-#define MIU_TEST_AGT_ADDR_HI (0x08)
-#define MIU_TEST_AGT_WRDATA_LO (0x10)
-#define MIU_TEST_AGT_WRDATA_HI (0x14)
-#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
-#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
-#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
-#define MIU_TEST_AGT_RDDATA_LO (0x18)
-#define MIU_TEST_AGT_RDDATA_HI (0x1c)
-#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
-#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
-#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
-
-#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
-#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
-
-/*
- * Register offsets for MS
- */
-#define SIU_TEST_AGT_BASE (0x60)
-
-#define SIU_TEST_AGT_ADDR_LO (0x04)
-#define SIU_TEST_AGT_ADDR_HI (0x18)
-#define SIU_TEST_AGT_WRDATA_LO (0x08)
-#define SIU_TEST_AGT_WRDATA_HI (0x0c)
-#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
-#define SIU_TEST_AGT_RDDATA_LO (0x10)
-#define SIU_TEST_AGT_RDDATA_HI (0x14)
-#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
-
-#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
-#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
-
/* XG Link status */
#define XG_LINK_UP 0x10
#define XG_LINK_DOWN 0x20
@@ -556,9 +514,6 @@ enum {
#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
-#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
-#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
-#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
@@ -568,28 +523,17 @@ enum {
#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+#define QLCNIC_CDRP_MAX_ARGS 4
+#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4)))
+
#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
-#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
-#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
-#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
-#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
-#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
-
#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
-#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
-
-#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
-
-#define CRB_V2P_0 (QLCNIC_REG(0x290))
-#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
-#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
-#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
@@ -616,11 +560,6 @@ enum {
/* Lock IDs for PHY lock */
#define PHY_LOCK_DRIVER 0x44524956
-/* Used for PS PCI Memory access */
-#define PCIX_PS_OP_ADDR_LO (0x10000)
-/* via CRB (PS side only) */
-#define PCIX_PS_OP_ADDR_HI (0x10004)
-
#define PCIX_INT_VECTOR (0x10100)
#define PCIX_INT_MASK (0x10104)
@@ -682,17 +621,6 @@ enum {
#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
-#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
-#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
-#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
-#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
-#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
-
-#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
-#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
-#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
-#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
@@ -711,7 +639,6 @@ enum {
#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -742,7 +669,11 @@ enum {
#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
#define QLCNIC_CMDPEG_CHECK_DELAY 500
#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
-#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10
+
+#define QLCNIC_MAX_MC_COUNT 38
+#define QLCNIC_MAX_UC_COUNT 512
+#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
@@ -766,26 +697,12 @@ struct qlcnic_legacy_intr_set {
u32 pci_int_reg;
};
-#define QLCNIC_FW_API 0x1b216c
-#define QLCNIC_DRV_OP_MODE 0x1b2170
#define QLCNIC_MSIX_BASE 0x132110
-#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
-/* FW dump defines */
-#define MIU_TEST_CTR 0x41000090
-#define MIU_TEST_ADDR_LO 0x41000094
-#define MIU_TEST_ADDR_HI 0x41000098
#define FLASH_ROM_WINDOW 0x42110030
#define FLASH_ROM_DATA 0x42150000
-
-static const u32 FW_DUMP_LEVELS[] = {
- 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
-
-static const u32 MIU_TEST_READ_DATA[] = {
- 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
-
#define QLCNIC_FW_DUMP_REG1 0x00130060
#define QLCNIC_FW_DUMP_REG2 0x001e0000
#define QLCNIC_FLASH_SEM2_LK 0x0013C010
@@ -796,7 +713,10 @@ static const u32 MIU_TEST_READ_DATA[] = {
enum {
QLCNIC_MGMT_FUNC = 0,
QLCNIC_PRIV_FUNC = 1,
- QLCNIC_NON_PRIV_FUNC = 2
+ QLCNIC_NON_PRIV_FUNC = 2,
+ QLCNIC_SRIOV_PF_FUNC = 3,
+ QLCNIC_SRIOV_VF_FUNC = 4,
+ QLCNIC_UNKNOWN_FUNC_MODE = 5
};
enum {
@@ -1013,6 +933,8 @@ enum {
#define QLCNIC_NIU_PROMISC_MODE 1
#define QLCNIC_NIU_ALLMULTI_MODE 2
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
struct crb_128M_2M_sub_block_map {
unsigned valid;
unsigned start_128M;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 7a6d5ebe4e0..851cb4a80d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -317,16 +317,28 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
- int done = 0, timeout = 0;
+ int timeout = 0, err = 0, done = 0;
while (!done) {
- done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
+ &err);
if (done == 1)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
- dev_err(&adapter->pdev->dev,
- "Failed to acquire sem=%d lock; holdby=%d\n",
- sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
+ if (id_reg) {
+ done = QLCRD32(adapter, id_reg, &err);
+ if (done != -1)
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock held by=%d\n",
+ sem, done);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock",
+ sem);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock", sem);
+ }
return -EIO;
}
msleep(1);
@@ -341,24 +353,36 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
void
qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
{
- QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+ int err = 0;
+
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
}
-static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
{
+ int err = 0;
u32 data;
if (qlcnic_82xx_check(adapter))
qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
- else
- return -EIO;
+ else {
+ data = QLCRD32(adapter, addr, &err);
+ if (err == -EIO)
+ return err;
+ }
return data;
}
-static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
+ int ret = 0;
+
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+ else
+ ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+ return ret;
}
static int
@@ -375,7 +399,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return -EIO;
- tx_ring = adapter->tx_ring;
+ tx_ring = &adapter->tx_ring[0];
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
@@ -417,9 +441,8 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
return 0;
}
-static int
-qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- __le16 vlan_id, unsigned op)
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ u16 vlan_id, u8 op)
{
struct qlcnic_nic_req req;
struct qlcnic_mac_req *mac_req;
@@ -434,47 +457,70 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mac_req = (struct qlcnic_mac_req *)&req.words[0];
mac_req->op = op;
- memcpy(mac_req->mac_addr, addr, 6);
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
- vlan_req->vlan_id = vlan_id;
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
-static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ struct list_head *head;
+ int err = -EINVAL;
+
+ /* Delete MAC from the existing list */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr)) {
+ err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ list_del(&cur->list);
+ kfree(cur);
+ return err;
+ }
+ }
+ return err;
+}
+
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
{
+ struct qlcnic_mac_vlan_list *cur;
struct list_head *head;
- struct qlcnic_mac_list_s *cur;
/* look up if already exists */
list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_list_s, list);
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal(addr, cur->mac_addr) &&
+ cur->vlan_id == vlan)
return 0;
}
- cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
- if (cur == NULL) {
- dev_err(&adapter->netdev->dev,
- "failed to add mac address filter\n");
+ cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
if (qlcnic_sre_macaddr_change(adapter,
- cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
+ cur->mac_addr, vlan, QLCNIC_MAC_ADD)) {
kfree(cur);
return -EIO;
}
+ cur->vlan_id = vlan;
list_add_tail(&cur->list, &adapter->mac_list);
return 0;
}
-void qlcnic_set_multi(struct net_device *netdev)
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct netdev_hw_addr *ha;
static const u8 bcast_addr[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
@@ -484,39 +530,58 @@ void qlcnic_set_multi(struct net_device *netdev)
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- qlcnic_nic_add_mac(adapter, adapter->mac_addr);
- qlcnic_nic_add_mac(adapter, bcast_addr);
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
+ qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
if (netdev->flags & IFF_PROMISC) {
if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
mode = VPORT_MISS_MODE_ACCEPT_ALL;
- goto send_fw_cmd;
- }
-
- if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) {
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
- goto send_fw_cmd;
+ } else if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
- if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(ha, netdev) {
- qlcnic_nic_add_mac(adapter, ha->addr);
- }
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
-send_fw_cmd:
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->mac_learn = 1;
+ adapter->drv_mac_learn = 1;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ adapter->rx_mac_learn = true;
} else {
- adapter->mac_learn = 0;
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
}
qlcnic_nic_set_promisc(adapter, mode);
}
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_set_multi(netdev);
+ else
+ __qlcnic_set_multi(netdev, 0);
+}
+
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
struct qlcnic_nic_req req;
u64 word;
@@ -535,13 +600,13 @@ int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
(struct cmd_desc_type0 *)&req, 1);
}
-void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
{
- struct qlcnic_mac_list_s *cur;
struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
qlcnic_sre_macaddr_change(adapter,
cur->mac_addr, 0, QLCNIC_MAC_DEL);
list_del(&cur->list);
@@ -552,21 +617,23 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
{
struct qlcnic_filter *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
int i;
+ unsigned long expires;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
- {
- if (jiffies >
- (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
qlcnic_sre_macaddr_change(adapter,
- tmp_fil->faddr, tmp_fil->vlan_id,
- tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
- QLCNIC_MAC_DEL);
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -575,22 +642,40 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
}
}
}
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
+ head = &(adapter->rx_fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
+ {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
+ spin_lock_bh(&adapter->rx_mac_learn_lock);
+ adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->rx_mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
}
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
{
struct qlcnic_filter *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
+ struct hlist_node *n;
struct hlist_head *head;
int i;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
- qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
- tmp_fil->vlan_id, tmp_fil->vlan_id ?
- QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -620,12 +705,13 @@ static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
return rv;
}
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
if (qlcnic_set_fw_loopback(adapter, mode))
return -EIO;
- if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
qlcnic_set_fw_loopback(adapter, 0);
return -EIO;
}
@@ -634,11 +720,11 @@ int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
- int mode = VPORT_MISS_MODE_DROP;
struct net_device *netdev = adapter->netdev;
+ mode = VPORT_MISS_MODE_DROP;
qlcnic_set_fw_loopback(adapter, 0);
if (netdev->flags & IFF_PROMISC)
@@ -648,12 +734,26 @@ void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
qlcnic_nic_set_promisc(adapter, mode);
msleep(1000);
+ return 0;
}
-/*
- * Send the interrupt coalescing parameter set by ethtool to the card.
- */
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac,
+ adapter->ahw->physical_port);
+ if (ret)
+ return ret;
+
+ memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+ adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+ return 0;
+}
+
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
int rv;
@@ -675,10 +775,34 @@ int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
+
return rv;
}
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ int rv;
+
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+ coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+ rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+ if (rv)
+ netdev_err(adapter->netdev,
+ "Failed to set Rx coalescing parametrs\n");
+
+ return rv;
+}
+
+#define QLCNIC_ENABLE_IPV4_LRO BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9)
+
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -694,7 +818,15 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64(enable);
+ word = 0;
+ if (enable) {
+ word = QLCNIC_ENABLE_IPV4_LRO;
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAP2_HW_LRO_IPV6)
+ word |= QLCNIC_ENABLE_IPV6_LRO;
+ }
+
+ req.words[0] = cpu_to_le64(word);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
@@ -734,9 +866,12 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
}
-#define RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10
+#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63)
+#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -761,13 +896,19 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
* 7-6: hash_type_ipv6
* 8: enable
* 9: use indirection table
- * 47-10: reserved
- * 63-48: indirection table mask
+ * 10: type-c rss
+ * 11: udp rss
+ * 47-12: reserved
+ * 62-48: indirection table mask
+ * 63: feature flag
*/
- word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
- ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
((u64)(enable & 0x1) << 8) |
- ((0x7ULL) << 48);
+ ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) |
+ (u64)QLCNIC_ENABLE_TYPE_C_RSS |
+ (u64)QLCNIC_RSS_FEATURE_FLAG;
+
req.words[0] = cpu_to_le64(word);
for (i = 0; i < 5; i++)
req.words[i+1] = cpu_to_le64(key[i]);
@@ -779,7 +920,8 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
{
struct qlcnic_nic_req req;
struct qlcnic_ipaddr *ipa;
@@ -801,23 +943,19 @@ int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
dev_err(&adapter->netdev->dev,
"could not notify %s IP 0x%x reuqest\n",
(cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
-
- return rv;
}
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
int rv;
-
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
req.words[0] = cpu_to_le64(enable | (enable << 8));
-
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
dev_err(&adapter->netdev->dev,
@@ -826,7 +964,7 @@ int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
u64 word;
@@ -876,15 +1014,57 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
return rc;
}
+static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
+ netdev_features_t features)
+{
+ u32 offload_flags = adapter->offload_flags;
+
+ if (offload_flags & BIT_0) {
+ features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+ adapter->rx_csum = 1;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ if (!(offload_flags & BIT_1))
+ features &= ~NETIF_F_TSO;
+ else
+ features |= NETIF_F_TSO;
+
+ if (!(offload_flags & BIT_2))
+ features &= ~NETIF_F_TSO6;
+ else
+ features |= NETIF_F_TSO6;
+ }
+ } else {
+ features &= ~(NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter))
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ adapter->rx_csum = 0;
+ }
+
+ return features;
+}
netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed;
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- netdev_features_t changed = features ^ netdev->features;
- features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
+ features = qlcnic_process_flags(adapter, features);
+ } else {
+ changed = features ^ netdev->features;
+ features ^= changed & (NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+ }
}
if (!(features & NETIF_F_RXCSUM))
@@ -903,13 +1083,15 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
if (!(changed & NETIF_F_LRO))
return 0;
- netdev->features = features ^ NETIF_F_LRO;
+ netdev->features ^= NETIF_F_LRO;
if (qlcnic_config_hw_lro(adapter, hw_lro))
return -EIO;
- if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
- return -EIO;
+ if (!hw_lro && qlcnic_82xx_check(adapter)) {
+ if (qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+ }
return 0;
}
@@ -981,8 +1163,8 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
return 0;
}
-int
-qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ u32 data)
{
unsigned long flags;
int rv;
@@ -1013,7 +1195,8 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
return -EIO;
}
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ int *err)
{
unsigned long flags;
int rv;
@@ -1042,7 +1225,6 @@ int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
return -1;
}
-
void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
u32 offset)
{
@@ -1081,7 +1263,7 @@ static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
return 0;
}
-void
+static void
qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1092,7 +1274,7 @@ qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
mutex_unlock(&adapter->ahw->mem_lock);
}
-void
+static void
qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
{
void __iomem *addr = adapter->ahw->pci_base0 +
@@ -1268,9 +1450,9 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
return ret;
}
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
{
- int offset, board_type, magic;
+ int offset, board_type, magic, err = 0;
struct pci_dev *pdev = adapter->pdev;
offset = QLCNIC_FW_MAGIC_OFFSET;
@@ -1290,7 +1472,9 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
adapter->ahw->board_type = board_type;
if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
- u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
+ if (err == -EIO)
+ return err;
if ((gpio & 0x8000) == 0)
board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
}
@@ -1326,14 +1510,17 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
return 0;
}
-int
+static int
qlcnic_wol_supported(struct qlcnic_adapter *adapter)
{
u32 wol_cfg;
+ int err = 0;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
if (wol_cfg & (1UL << adapter->portnum)) {
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
if (wol_cfg & (1 << adapter->portnum))
return 1;
}
@@ -1341,7 +1528,7 @@ qlcnic_wol_supported(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
{
struct qlcnic_nic_req req;
int rv;
@@ -1353,7 +1540,7 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
req.words[1] = cpu_to_le64(state);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
@@ -1362,3 +1549,138 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
+
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_STATUS);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state, err=%d\n",
+ err);
+ } else {
+ beacon_state = cmd.rsp.arg[1];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLCNIC_BEACON_OFF;
+ else if (beacon_state == QLCNIC_BEACON_EANBLE)
+ ahw->beacon_state = QLCNIC_BEACON_ON;
+ }
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return;
+}
+
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ u32 func;
+ u32 msix_base;
+
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw->pci_func = func;
+}
+
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int err = 0;
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset, &err);
+ memcpy(buf, &data, size);
+ }
+}
+
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+}
+
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter)
+{
+ return qlcnic_pcie_sem_lock(adapter, 5, 0);
+}
+
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_pcie_sem_unlock(adapter, 5);
+}
+
+int qlcnic_82xx_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_resume(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_up(adapter, netdev);
+ if (!err)
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
new file mode 100644
index 00000000000..cbe2399c30a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -0,0 +1,225 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HW_H
+#define __QLCNIC_HW_H
+
+/* Common registers in 83xx and 82xx */
+enum qlcnic_regs {
+ QLCNIC_PEG_HALT_STATUS1 = 0,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_FLASH_LOCK_OWNER,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_FW_VERSION_MAJOR,
+ QLCNIC_FW_VERSION_MINOR,
+ QLCNIC_FW_VERSION_SUB,
+ QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_FW_IMG_VALID,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_ASIC_TEMP,
+ QLCNIC_FW_API,
+ QLCNIC_DRV_OP_MODE,
+ QLCNIC_FLASH_LOCK,
+ QLCNIC_FLASH_UNLOCK,
+};
+
+/* Read from an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_RD32(a, addr) \
+ readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Write to an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_WR32(a, addr, value) \
+ writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Read from a direct address offset from BAR0, additional registers */
+#define QLCRDX(ahw, addr) \
+ readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))
+
+/* Write to a direct address offset from BAR0, additional registers */
+#define QLCWRX(ahw, addr, value) \
+ writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])))
+
+#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1
+#define QLCNIC_CMD_CONFIG_INTRPT 0x2
+#define QLCNIC_CMD_CREATE_RX_CTX 0x7
+#define QLCNIC_CMD_DESTROY_RX_CTX 0x8
+#define QLCNIC_CMD_CREATE_TX_CTX 0x9
+#define QLCNIC_CMD_DESTROY_TX_CTX 0xa
+#define QLCNIC_CMD_CONFIGURE_LRO 0xC
+#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD
+#define QLCNIC_CMD_GET_STATISTICS 0xF
+#define QLCNIC_CMD_INTRPT_TEST 0x11
+#define QLCNIC_CMD_SET_MTU 0x12
+#define QLCNIC_CMD_READ_PHY 0x13
+#define QLCNIC_CMD_WRITE_PHY 0x14
+#define QLCNIC_CMD_READ_HW_REG 0x15
+#define QLCNIC_CMD_GET_FLOW_CTL 0x16
+#define QLCNIC_CMD_SET_FLOW_CTL 0x17
+#define QLCNIC_CMD_READ_MAX_MTU 0x18
+#define QLCNIC_CMD_READ_MAX_LRO 0x19
+#define QLCNIC_CMD_MAC_ADDRESS 0x1f
+#define QLCNIC_CMD_GET_PCI_INFO 0x20
+#define QLCNIC_CMD_GET_NIC_INFO 0x21
+#define QLCNIC_CMD_SET_NIC_INFO 0x22
+#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24
+#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25
+#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26
+#define QLCNIC_CMD_SET_PORTMIRRORING 0x27
+#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28
+#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29
+#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a
+#define QLCNIC_CMD_CONFIG_PORT 0x2e
+#define QLCNIC_CMD_TEMP_SIZE 0x2f
+#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
+#define QLCNIC_CMD_CONFIG_VPORT 0x32
+#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
+#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
+#define QLCNIC_CMD_GET_MAC_STATS 0x37
+#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
+#define QLCNIC_CMD_GET_LED_STATUS 0x3C
+#define QLCNIC_CMD_CONFIGURE_RSS 0x41
+#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
+#define QLCNIC_CMD_CONFIGURE_LED 0x44
+#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45
+#define QLCNIC_CMD_GET_LINK_EVENT 0x48
+#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
+#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E
+#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
+#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
+#define QLCNIC_CMD_IDC_ACK 0x63
+#define QLCNIC_CMD_SET_PORT_CONFIG 0x66
+#define QLCNIC_CMD_GET_PORT_CONFIG 0x67
+#define QLCNIC_CMD_GET_LINK_STATUS 0x68
+#define QLCNIC_CMD_SET_LED_CONFIG 0x69
+#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
+#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
+#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+
+#define QLCNIC_INTRPT_INTX 1
+#define QLCNIC_INTRPT_MSIX 3
+#define QLCNIC_INTRPT_ADD 1
+#define QLCNIC_INTRPT_DEL 2
+
+#define QLCNIC_GET_CURRENT_MAC 1
+#define QLCNIC_SET_STATION_MAC 2
+#define QLCNIC_GET_DEFAULT_MAC 3
+#define QLCNIC_GET_FAC_DEF_MAC 4
+#define QLCNIC_SET_FAC_DEF_MAC 5
+
+#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_BC_EVENT 0x8002
+#define QLCNIC_MBX_COMP_EVENT 0x8100
+#define QLCNIC_MBX_REQUEST_EVENT 0x8101
+#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
+#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
+#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
+
+struct qlcnic_mailbox_metadata {
+ u32 cmd;
+ u32 in_args;
+ u32 out_args;
+};
+
+/* Mailbox ownership */
+#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1))
+
+#define QLCNIC_SET_OWNER 1
+#define QLCNIC_CLR_OWNER 0
+#define QLCNIC_MBX_TIMEOUT 5000
+
+#define QLCNIC_MBX_RSP_OK 1
+#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS 8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
+struct qlcnic_pci_info;
+struct qlcnic_info;
+struct qlcnic_cmd_args;
+struct ethtool_stats;
+struct pci_device_id;
+struct qlcnic_host_sds_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_hardware_context;
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+ u64 *uaddr, u16 vlan_id);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32, int);
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *tx_ring, int);
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
+int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *);
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+int qlcnic_82xx_shutdown(struct pci_dev *);
+int qlcnic_82xx_resume(struct qlcnic_adapter *);
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
+void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
+#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index de79cde233d..c4262c23ed7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1,15 +1,12 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/if_vlan.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
struct crb_addr_pair {
u32 addr;
@@ -130,12 +127,14 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
}
}
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_buffer *cmd_buf;
struct qlcnic_skb_frag *buffrag;
int i, j;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ spin_lock(&tx_ring->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
@@ -145,7 +144,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -160,19 +159,20 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
}
cmd_buf++;
}
+
+ spin_unlock(&tx_ring->tx_clean_lock);
}
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
{
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
int ring;
recv_ctx = adapter->recv_ctx;
if (recv_ctx->rds_rings == NULL)
- goto skip_rds;
+ return;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -180,16 +180,6 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->rx_buf_arr = NULL;
}
kfree(recv_ctx->rds_rings);
-
-skip_rds:
- if (adapter->tx_ring == NULL)
- return;
-
- tx_ring = adapter->tx_ring;
- vfree(tx_ring->cmd_buf_arr);
- tx_ring->cmd_buf_arr = NULL;
- kfree(adapter->tx_ring);
- adapter->tx_ring = NULL;
}
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -197,39 +187,16 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_rx_buffer *rx_buf;
- int ring, i, size;
-
- struct qlcnic_cmd_buffer *cmd_buf_arr;
- struct net_device *netdev = adapter->netdev;
-
- size = sizeof(struct qlcnic_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
- return -ENOMEM;
- }
- adapter->tx_ring = tx_ring;
-
- tx_ring->num_desc = adapter->num_txd;
- tx_ring->txq = netdev_get_tx_queue(netdev, 0);
-
- cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
- goto err_out;
- }
- tx_ring->cmd_buf_arr = cmd_buf_arr;
+ int ring, i;
recv_ctx = adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -255,11 +222,9 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
break;
}
rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
- if (rds_ring->rx_buf_arr == NULL) {
- dev_err(&netdev->dev, "Failed to allocate "
- "rx buffer ring %d\n", ring);
+ if (rds_ring->rx_buf_arr == NULL)
goto err_out;
- }
+
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
@@ -275,12 +240,18 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
spin_lock_init(&rds_ring->lock);
}
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
-
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ sds_ring->tx_ring = &adapter->tx_ring[ring];
+ else
+ sds_ring->tx_ring = &adapter->tx_ring[0];
+ }
for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
INIT_LIST_HEAD(&sds_ring->free_list[i]);
}
@@ -325,11 +296,11 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
{
long timeout = 0;
long done = 0;
+ int err = 0;
cond_resched();
-
while (done == 0) {
- done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
done &= 2;
if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
dev_err(&adapter->pdev->dev,
@@ -344,6 +315,8 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
static int do_rom_fast_read(struct qlcnic_adapter *adapter,
u32 addr, u32 *valp)
{
+ int err = 0;
+
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
@@ -357,7 +330,9 @@ static int do_rom_fast_read(struct qlcnic_adapter *adapter,
udelay(10);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
+ if (err == -EIO)
+ return err;
return 0;
}
@@ -409,15 +384,15 @@ int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
{
- int addr, val;
+ int addr, err = 0;
int i, n, init_delay;
struct crb_addr_pair *buf;
unsigned offset;
- u32 off;
+ u32 off, val;
struct pci_dev *pdev = adapter->pdev;
- QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
- QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0);
/* Halt all the indiviual PEGs and other blocks */
/* disable all I2Q */
@@ -442,7 +417,9 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
- val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000);
+ val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
+ if (err == -EIO)
+ return err;
QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
/* halt epg */
@@ -482,10 +459,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
}
buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
- if (buf == NULL) {
- dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ if (buf == NULL)
return -ENOMEM;
- }
for (i = 0; i < n; i++) {
if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
@@ -564,8 +539,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
msleep(1);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
return 0;
}
@@ -576,7 +551,7 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE);
switch (val) {
case PHAN_INITIALIZE_COMPLETE:
@@ -592,7 +567,8 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
} while (--retries);
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
out_err:
dev_err(&adapter->pdev->dev, "Command Peg initialization not "
@@ -607,7 +583,7 @@ qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE);
if (val == PHAN_PEG_RCV_INITIALIZED)
return 0;
@@ -638,7 +614,7 @@ qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
if (err)
return err;
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
}
@@ -649,7 +625,7 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
int timeo;
u32 val;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
val = QLC_DEV_GET_DRV(val, adapter->portnum);
if ((val & 0x3) != QLCNIC_TYPE_NIC) {
dev_err(&adapter->pdev->dev,
@@ -689,11 +665,9 @@ static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
}
entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
- flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
- if (flt_entry == NULL) {
- dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ flt_entry = vzalloc(entry_size);
+ if (flt_entry == NULL)
return -EIO;
- }
ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
sizeof(struct qlcnic_flt_header),
@@ -762,10 +736,12 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
- u32 capability;
- capability = 0;
+ u32 capability = 0;
+ int err = 0;
- capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
+ if (err == -EIO)
+ return err;
if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
return 1;
@@ -1096,11 +1072,13 @@ qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
u32 heartbeat, ret = -EIO;
int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
- adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ adapter->heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
do {
msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
ret = QLCNIC_RCODE_SUCCESS;
break;
@@ -1270,7 +1248,7 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6f82812d0fa..e45bf09af0c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1,23 +1,31 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
+#include <net/checksum.h>
#include "qlcnic.h"
-#define QLCNIC_MAC_HASH(MAC)\
- ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
+#define QLCNIC_TX_ETHER_PKT 0x01
+#define QLCNIC_TX_TCP_PKT 0x02
+#define QLCNIC_TX_UDP_PKT 0x03
+#define QLCNIC_TX_IP_PKT 0x04
+#define QLCNIC_TX_TCP_LSO 0x05
+#define QLCNIC_TX_TCP_LSO6 0x06
+#define QLCNIC_TX_ENCAP_PKT 0x07
+#define QLCNIC_TX_ENCAP_LSO 0x08
+#define QLCNIC_TX_TCPV6_PKT 0x0b
+#define QLCNIC_TX_UDPV6_PKT 0x0c
-#define TX_ETHER_PKT 0x01
-#define TX_TCP_PKT 0x02
-#define TX_UDP_PKT 0x03
-#define TX_IP_PKT 0x04
-#define TX_TCP_LSO 0x05
-#define TX_TCP_LSO6 0x06
-#define TX_TCPV6_PKT 0x0b
-#define TX_UDPV6_PKT 0x0c
-#define FLAGS_VLAN_TAGGED 0x10
-#define FLAGS_VLAN_OOB 0x40
+#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
+#define QLCNIC_FLAGS_VLAN_OOB 0x40
#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
(cmd_desc)->vlan_TCI = cpu_to_le16(v);
@@ -84,6 +92,8 @@
#define qlcnic_get_lro_sts_mss(sts_data1) \
((sts_data1 >> 32) & 0x0FFFF)
+#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
+
/* opcode field in status_desc */
#define QLCNIC_SYN_OFFLOAD 0x03
#define QLCNIC_RXPKT_DESC 0x04
@@ -91,18 +101,180 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
+#define QLCNIC_TX_POLL_BUDGET 128
+#define QLCNIC_TCP_HDR_SIZE 20
+#define QLCNIC_TCP_TS_OPTION_SIZE 12
+#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
+#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
+
+#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
+
/* for status field in status_desc */
#define STATUS_CKSUM_LOOP 0
#define STATUS_CKSUM_OK 2
-static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 uaddr, __le16 vlan_id,
- struct qlcnic_host_tx_ring *tx_ring)
+#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
+#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
+#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
+#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
+#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
+#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
+#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
+#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
+#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
+#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
+#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
+#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
+#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
+
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max);
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *,
+ u16, u16);
+
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
+{
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
+}
+
+static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
+ u16 handle, u8 ring_id)
+{
+ if (qlcnic_83xx_check(adapter))
+ return handle | (ring_id << 15);
+ else
+ return handle;
+}
+
+static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
+{
+ return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
+}
+
+static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_filter *fil,
+ void *addr, u16 vlan_id)
+{
+ int ret;
+ u8 op;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (ret)
+ return;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (!ret) {
+ hlist_del(&fil->fnode);
+ adapter->rx_fhash.fnum--;
+ }
+}
+
+static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
+ void *addr, u16 vlan_id)
+{
+ struct qlcnic_filter *tmp_fil = NULL;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (ether_addr_equal(tmp_fil->faddr, addr) &&
+ tmp_fil->vlan_id == vlan_id)
+ return tmp_fil;
+ }
+
+ return NULL;
+}
+
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_head *head;
+ unsigned long time;
+ u64 src_addr = 0;
+ u8 hindex, op;
+ int ret;
+
+ if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+ vlan_id = 0;
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = qlcnic_mac_hash(src_addr, vlan_id) &
+ (adapter->fhash.fbucket_size - 1);
+
+ if (loopback_pkt) {
+ if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
+ return;
+
+ head = &(adapter->rx_fhash.fhead[hindex]);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ time = tmp_fil->ftime;
+ if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ fil->ftime = jiffies;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ fil->vlan_id = vlan_id;
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->rx_fhash.fnum++;
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ } else {
+ head = &adapter->fhash.fhead[hindex];
+
+ spin_lock(&adapter->mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter,
+ (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ hlist_del(&tmp_fil->fnode);
+ adapter->fhash.fnum--;
+ }
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ return;
+ }
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ head = &adapter->rx_fhash.fhead[hindex];
+
+ spin_lock(&adapter->rx_mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil)
+ qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
+ vlan_id);
+
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ }
+}
+
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+ u16 vlan_id)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -118,100 +290,200 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
- vlan_req->vlan_id = vlan_id;
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
smp_mb();
}
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
+ struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
+ struct hlist_node *n;
u64 src_addr = 0;
- __le16 vlan_id = 0;
- u8 hindex;
+ u16 vlan_id = 0;
+ u8 hindex, hval;
if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
return;
- if (adapter->fhash.fnum >= adapter->fhash.fmax)
- return;
+ if (adapter->flags & QLCNIC_VLAN_FILTERING) {
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ vlan_id = ntohs(vh->h_vlan_TCI);
+ } else if (vlan_tx_tag_present(skb)) {
+ vlan_id = vlan_tx_tag_get(skb);
+ }
+ }
- /* Only NPAR capable devices support vlan based learning*/
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- vlan_id = first_desc->vlan_TCI;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ hval = qlcnic_mac_hash(src_addr, vlan_id);
+ hindex = hval & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
-
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
+ tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
- qlcnic_change_filter(adapter, src_addr, vlan_id,
- tx_ring);
+ qlcnic_change_filter(adapter, &src_addr,
+ vlan_id);
tmp_fil->ftime = jiffies;
return;
}
}
+ if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+ adapter->stats.mac_filter_limit_overrun++;
+ return;
+ }
+
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
if (!fil)
return;
- qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
-
+ qlcnic_change_filter(adapter, &src_addr, vlan_id);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
-
spin_lock(&adapter->mac_learn_lock);
-
hlist_add_head(&(fil->fnode), head);
adapter->fhash.fnum++;
-
spin_unlock(&adapter->mac_learn_lock);
}
+#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
+#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
+#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
+#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
+#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
+#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
+
+static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
+ int copied, copy_len, descr_size;
+ u32 producer = tx_ring->producer;
+ struct cmd_desc_type0 *hwdesc;
+ u16 flags = 0, encap_descr = 0;
+
+ opcode = QLCNIC_TX_ETHER_PKT;
+ encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
+
+ if (skb_is_gso(skb)) {
+ inner_hdr_len = skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) -
+ skb_inner_mac_header(skb);
+
+ /* VXLAN header size = 8 */
+ outer_hdr_len = skb_transport_offset(skb) + 8 +
+ sizeof(struct udphdr);
+ first_desc->outer_hdr_length = outer_hdr_len;
+ total_hdr_len = inner_hdr_len + outer_hdr_len;
+ encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
+ QLCNIC_ENCAP_DO_L4_CSUM;
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->hdr_length = inner_hdr_len;
+
+ /* Copy inner and outer headers in Tx descriptor(s)
+ * If total_hdr_len > cmd_desc_type0, use multiple
+ * descriptors
+ */
+ copied = 0;
+ descr_size = (int)sizeof(struct cmd_desc_type0);
+ while (copied < total_hdr_len) {
+ copy_len = min(descr_size, (total_hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc,
+ copy_len);
+ copied += copy_len;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+
+ /* Make sure updated tx_ring->producer is visible
+ * for qlcnic_tx_avail()
+ */
+ smp_mb();
+ adapter->stats.encap_lso_frames++;
+
+ opcode = QLCNIC_TX_ENCAP_LSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ } else {
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ }
+
+ adapter->stats.encap_tx_csummed++;
+ opcode = QLCNIC_TX_ENCAP_PKT;
+ }
+
+ /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
+ if (ip_hdr(skb)->version == 6)
+ encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
+
+ /* outer IP header's size in 32bit words size*/
+ encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
+
+ /* outer IP header offset */
+ encap_descr |= skb_network_offset(skb) << 10;
+ first_desc->encap_descr = cpu_to_le16(encap_descr);
+
+ first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
+ skb->data;
+ first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
+
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
- struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 l4proto, opcode = 0, hdr_len = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u16 protocol = ntohs(skb->protocol);
u32 producer = tx_ring->producer;
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
- flags = FLAGS_VLAN_TAGGED;
+ flags = QLCNIC_FLAGS_VLAN_TAGGED;
vlan_tci = ntohs(vh->h_vlan_TCI);
protocol = ntohs(vh->h_vlan_encapsulated_proto);
} else if (vlan_tx_tag_present(skb)) {
- flags = FLAGS_VLAN_OOB;
+ flags = QLCNIC_FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
}
- if (unlikely(adapter->pvid)) {
+ if (unlikely(adapter->tx_pvid)) {
if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
return -EIO;
if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
goto set_flags;
- flags = FLAGS_VLAN_OOB;
- vlan_tci = adapter->pvid;
+ flags = QLCNIC_FLAGS_VLAN_OOB;
+ vlan_tci = adapter->tx_pvid;
}
set_flags:
qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
@@ -221,26 +493,26 @@ set_flags:
flags |= BIT_0;
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
- opcode = TX_ETHER_PKT;
- if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
- skb_shinfo(skb)->gso_size > 0) {
+ opcode = QLCNIC_TX_ETHER_PKT;
+ if (skb_is_gso(skb)) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_desc->total_hdr_length = hdr_len;
- opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+ first_desc->hdr_length = hdr_len;
+ opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
+ QLCNIC_TX_TCP_LSO;
/* For LSO, we need to copy the MAC/IP/TCP headers into
* the descriptor ring */
copied = 0;
offset = 2;
- if (flags & FLAGS_VLAN_OOB) {
- first_desc->total_hdr_length += VLAN_HLEN;
+ if (flags & QLCNIC_FLAGS_VLAN_OOB) {
+ first_desc->hdr_length += VLAN_HLEN;
first_desc->tcp_hdr_offset = VLAN_HLEN;
first_desc->ip_hdr_offset = VLAN_HLEN;
/* Only in case of TSO on vlan device */
- flags |= FLAGS_VLAN_TAGGED;
+ flags |= QLCNIC_FLAGS_VLAN_TAGGED;
/* Create a TSO vlan header template for firmware */
hwdesc = &tx_ring->desc_head[producer];
@@ -284,16 +556,16 @@ set_flags:
l4proto = ip_hdr(skb)->protocol;
if (l4proto == IPPROTO_TCP)
- opcode = TX_TCP_PKT;
+ opcode = QLCNIC_TX_TCP_PKT;
else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDP_PKT;
+ opcode = QLCNIC_TX_UDP_PKT;
} else if (protocol == ETH_P_IPV6) {
l4proto = ipv6_hdr(skb)->nexthdr;
if (l4proto == IPPROTO_TCP)
- opcode = TX_TCPV6_PKT;
+ opcode = QLCNIC_TX_TCPV6_PKT;
else if (l4proto == IPPROTO_UDP)
- opcode = TX_UDPV6_PKT;
+ opcode = QLCNIC_TX_UDPV6_PKT;
}
}
first_desc->tcp_hdr_offset += skb_transport_offset(skb);
@@ -375,7 +647,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc)
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *pbuf;
struct qlcnic_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
@@ -383,11 +655,11 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct ethhdr *phdr;
int i, k, frag_count, delta = 0;
u32 producer, num_txd;
-
- num_txd = tx_ring->num_desc;
+ u16 protocol;
+ bool l4_is_udp = false;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
return NETDEV_TX_BUSY;
}
@@ -397,7 +669,11 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto drop_packet;
}
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+ num_txd = tx_ring->num_desc;
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
+
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
@@ -412,11 +688,11 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(tx_ring->txq);
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_start_queue(netdev);
+ netif_tx_start_queue(tx_ring->txq);
} else {
- adapter->stats.xmit_off++;
+ tx_ring->tx_stats.xmit_off++;
return NETDEV_TX_BUSY;
}
}
@@ -471,14 +747,29 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->producer = get_next_index(producer, num_txd);
smp_mb();
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
- goto unwind_buff;
+ protocol = ntohs(skb->protocol);
+ if (protocol == ETH_P_IP)
+ l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
+ else if (protocol == ETH_P_IPV6)
+ l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
+
+ /* Check if it is a VXLAN packet */
+ if (!skb->encapsulation || !l4_is_udp ||
+ !qlcnic_encap_tx_offload(adapter)) {
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
+ tx_ring)))
+ goto unwind_buff;
+ } else {
+ if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
+ skb, tx_ring)))
+ goto unwind_buff;
+ }
- if (adapter->mac_learn)
- qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+ if (adapter->drv_mac_learn)
+ qlcnic_send_filter(adapter, first_desc, skb);
- adapter->stats.txbytes += skb->len;
- adapter->stats.xmitcalled++;
+ tx_ring->tx_stats.tx_bytes += skb->len;
+ tx_ring->tx_stats.xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
@@ -499,17 +790,20 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
if (adapter->ahw->linkup && !linkup) {
netdev_info(netdev, "NIC Link is down\n");
adapter->ahw->linkup = 0;
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- }
+ netif_carrier_off(netdev);
} else if (!adapter->ahw->linkup && linkup) {
- netdev_info(netdev, "NIC Link is up\n");
adapter->ahw->linkup = 1;
- if (netif_running(netdev)) {
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+
+ /* Do not advertise Link up to the stack if device
+ * is in loopback mode
+ */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+ netdev_info(netdev, "NIC Link is up for loopback test\n");
+ return;
}
+
+ netdev_info(netdev, "NIC Link is up\n");
+ netif_carrier_on(netdev);
}
}
@@ -528,8 +822,8 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
skb_reserve(skb, NET_IP_ALIGN);
- dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
adapter->stats.rx_dma_map_error++;
@@ -544,12 +838,13 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring,
+ u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- uint32_t producer;
+ uint32_t producer, handle;
struct list_head *head;
if (!spin_trylock(&rds_ring->lock))
@@ -557,7 +852,6 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
producer = rds_ring->producer;
head = &rds_ring->free_list;
-
while (!list_empty(head)) {
buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
@@ -565,28 +859,29 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
-
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter,
+ buffer->ref_handle, ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
producer = get_next_index(producer, rds_ring->num_desc);
}
-
if (count) {
rds_ring->producer = producer;
writel((producer - 1) & (rds_ring->num_desc - 1),
rds_ring->crb_rcv_producer);
}
-
spin_unlock(&rds_ring->lock);
}
-static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int budget)
{
u32 sw_consumer, hw_consumer;
int i, done, count = 0;
@@ -594,9 +889,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
- if (!spin_trylock(&adapter->tx_clean_lock))
+ if (!spin_trylock(&tx_ring->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
@@ -615,26 +909,25 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
-
- adapter->stats.xmitfinished++;
+ tx_ring->tx_stats.xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
- if (++count >= MAX_STATUS_HANDLE)
+ if (++count >= budget)
break;
}
- if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
+ tx_ring->sw_consumer = sw_consumer;
+ if (count && netif_running(netdev)) {
smp_mb();
-
- if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ if (netif_tx_queue_stopped(tx_ring->txq) &&
+ netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_wake_queue(netdev);
- adapter->stats.xmit_on++;
+ netif_tx_wake_queue(tx_ring->txq);
+ tx_ring->tx_stats.xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
@@ -655,27 +948,50 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
+ spin_unlock(&tx_ring->tx_clean_lock);
return done;
}
static int qlcnic_poll(struct napi_struct *napi, int budget)
{
+ int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
- int tx_complete, work_done;
+ struct qlcnic_host_tx_ring *tx_ring;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
+ tx_ring = sds_ring->tx_ring;
- tx_complete = qlcnic_process_cmd_ring(adapter);
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
+ budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+
+ return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
@@ -695,7 +1011,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
return work_done;
@@ -785,45 +1101,45 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
break;
case 1:
dev_info(dev, "loopback already in progress\n");
- adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+ adapter->ahw->diag_cnt = -EINPROGRESS;
break;
case 2:
dev_info(dev, "loopback cable is not connected\n");
- adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+ adapter->ahw->diag_cnt = -ENODEV;
break;
default:
dev_info(dev,
"loopback configure request failed, err %x\n",
ret);
- adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+ adapter->ahw->diag_cnt = -EIO;
break;
}
break;
+ case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
+ break;
default:
break;
}
}
-static struct sk_buff *
-qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring, u16 index,
- u16 cksum)
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
{
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
- buffer = &rds_ring->rx_buf_arr[index];
-
+ buffer = &ring->rx_buf_arr[index];
if (unlikely(buffer->skb == NULL)) {
WARN_ON(1);
return NULL;
}
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
-
if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
(cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
adapter->stats.csummed++;
@@ -832,6 +1148,7 @@ qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
+
buffer->skb = NULL;
return skb;
@@ -847,10 +1164,10 @@ static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
skb_pull(skb, VLAN_HLEN);
}
- if (!adapter->pvid)
+ if (!adapter->rx_pvid)
return 0;
- if (*vlan_tag == adapter->pvid) {
+ if (*vlan_tag == adapter->rx_pvid) {
/* Outer vlan tag. Packet should follow non-vlan path */
*vlan_tag = 0xffff;
return 0;
@@ -871,8 +1188,8 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
- u16 vid = 0xffff;
+ int index, length, cksum, pkt_offset, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -892,6 +1209,12 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->rx_mac_learn) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+ }
+
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
@@ -909,7 +1232,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
napi_gro_receive(&sds_ring->napi, skb);
@@ -933,19 +1256,20 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
struct tcphdr *th;
bool push, timestamp;
- int index, l2_hdr_offset, l4_hdr_offset;
- u16 lro_length, length, data_offset, vid = 0xffff;
+ int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
+ u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = qlcnic_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
@@ -961,6 +1285,12 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->rx_mac_learn) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+ }
+
if (timestamp)
data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
else
@@ -976,21 +1306,34 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
}
skb->protocol = eth_type_trans(skb, netdev);
- iph = (struct iphdr *)skb->data;
- th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
- length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
- iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ }
+
th->psh = push;
th->seq = htonl(seq_number);
length = skb->len;
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
if (vid != 0xffff)
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
netif_receive_skb(skb);
adapter->stats.lro_pkts++;
@@ -999,16 +1342,16 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
return buffer;
}
-int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
{
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_adapter *adapter = sds_ring->adapter;
struct list_head *cur;
struct status_desc *desc;
struct qlcnic_rx_buffer *rxbuf;
+ int opcode, desc_cnt, count = 0;
u64 sts_data0, sts_data1;
- __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
- int opcode, ring, desc_cnt, count = 0;
+ u8 ring;
u32 consumer = sds_ring->consumer;
while (count < max) {
@@ -1020,7 +1363,6 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
opcode = qlcnic_get_sts_opcode(sts_data0);
-
switch (opcode) {
case QLCNIC_RXPKT_DESC:
case QLCNIC_OLD_RXPKT_DESC:
@@ -1040,18 +1382,16 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
default:
goto skip;
}
-
WARN_ON(desc_cnt > 1);
if (likely(rxbuf))
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
else
adapter->stats.null_rxbuf++;
-
skip:
for (; desc_cnt > 0; desc_cnt--) {
desc = &sds_ring->desc_head[consumer];
- desc->status_desc_data[0] = owner_phantom;
+ desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
consumer = get_next_index(consumer, sds_ring->num_desc);
}
count++;
@@ -1059,7 +1399,6 @@ skip:
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
-
if (!list_empty(&sds_ring->free_list[ring])) {
list_for_each(cur, &sds_ring->free_list[ring]) {
rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
@@ -1072,7 +1411,7 @@ skip:
spin_unlock(&rds_ring->lock);
}
- qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
}
if (count) {
@@ -1084,12 +1423,12 @@ skip:
}
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- u32 producer;
+ u32 producer, handle;
struct list_head *head;
producer = rds_ring->producer;
@@ -1110,7 +1449,9 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
+ ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
producer = get_next_index(producer, rds_ring->num_desc);
}
@@ -1180,7 +1521,7 @@ static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
return;
}
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
struct status_desc *desc;
@@ -1217,93 +1558,651 @@ void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
writel(consumer, sds_ring->crb_sts_consumer);
}
-void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
{
- u32 mac_low, mac_high;
- int i;
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
- mac_low = off1;
- mac_high = off2;
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+ return -ENOMEM;
- if (alt_mac) {
- mac_low |= (mac_low >> 16) | (mac_high << 16);
- mac_high >>= 16;
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
+ NAPI_POLL_WEIGHT);
+ } else {
+ if (ring == (adapter->drv_sds_rings - 1))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
}
- for (i = 0; i < 2; i++)
- mac[i] = (u8)(mac_high >> ((1 - i) * 8));
- for (i = 2; i < 6; i++)
- mac[i] = (u8)(mac_low >> ((5 - i) * 8));
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ return 0;
}
-int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
{
- int ring, max_sds_rings;
+ int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
- if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
- return -ENOMEM;
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
- max_sds_rings = adapter->max_sds_rings;
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_free_tx_rings(adapter);
+}
- if (ring == max_sds_rings - 1)
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
- else
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- QLCNIC_NETDEV_WEIGHT*2);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
- return 0;
+ if (qlcnic_check_multi_tx(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
}
-void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- netif_napi_del(&sds_ring->napi);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
}
- qlcnic_free_sds_rings(adapter->recv_ctx);
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ qlcnic_check_multi_tx(adapter)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
+#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
+
+static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
+{
+ if (lro_pkt)
+ return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
+ else
+ return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
+}
+
+#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
+
+static inline u8 qlcnic_encap_length(u64 sts_data)
+{
+ return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, is_lb_pkt;
+ u16 vid = 0xffff;
+ int err;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_83xx_pktln(sts_data[0]);
+ cksum = qlcnic_83xx_csum_status(sts_data[1]);
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (qlcnic_encap_length(sts_data[1]) &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb->encapsulation = 1;
+ adapter->stats.encap_rx_csummed++;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index, is_lb_pkt;
+ u16 lro_length, length, data_offset, gso_size;
+ u16 vid = 0xffff;
+ int err;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
+ l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
+ l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
+ push = qlcnic_83xx_is_psh_bit(sts_data[1]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (qlcnic_83xx_is_tstamp(sts_data[1]))
+ data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ }
+
+ th->psh = push;
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
+ skb_shinfo(skb)->gso_size = gso_size;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+ return buffer;
+}
+
+static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf = NULL;
+ u8 ring;
+ u64 sts_data[2];
+ int count = 0, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ break;
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+
+ switch (opcode) {
+ case QLC_83XX_REG_DESC:
+ rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
+ ring, sts_data);
+ break;
+ case QLC_83XX_LRO_DESC:
+ rxbuf = qlcnic_83xx_process_lro(adapter, ring,
+ sts_data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode: 0x%x\n", opcode);
+ goto skip;
+ }
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ desc = &sds_ring->desc_head[consumer];
+ /* Reset the descriptor */
+ desc->status_desc_data[1] = 0;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ count++;
+ }
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+ return count;
}
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+
+ budget = QLCNIC_TX_POLL_BUDGET;
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
napi_enable(&sds_ring->napi);
- qlcnic_enable_int(sds_ring);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
}
}
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_disable_sds_intr(adapter, sds_ring);
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_msix_sriov_vf_poll,
+ NAPI_POLL_WEIGHT);
+
+ } else {
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi,
+ qlcnic_83xx_msix_tx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ length = qlcnic_83xx_pktln(sts_data[0]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data[2];
+ int ring, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ return;
+
+ ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d833f592789..4fc186713b6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1,24 +1,29 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
-#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#include <linux/pci.h>
+#ifdef CONFIG_QLCNIC_VXLAN
+#include <net/vxlan.h>
+#endif
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -29,31 +34,27 @@ char qlcnic_driver_name[] = "qlcnic";
static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
"Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
-static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn;
module_param(qlcnic_mac_learn, int, 0444);
-MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
+MODULE_PARM_DESC(qlcnic_mac_learn,
+ "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
-static int qlcnic_use_msi = 1;
-MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+int qlcnic_use_msi = 1;
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
module_param_named(use_msi, qlcnic_use_msi, int, 0444);
-static int qlcnic_use_msi_x = 1;
-MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+int qlcnic_use_msi_x = 1;
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
-static int qlcnic_auto_fw_reset = 1;
-MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+int qlcnic_auto_fw_reset = 1;
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
-static int qlcnic_load_fw_file;
-MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
+int qlcnic_load_fw_file;
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
-static int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0444);
-MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
-
static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void qlcnic_remove(struct pci_dev *pdev);
static int qlcnic_open(struct net_device *netdev);
@@ -61,47 +62,58 @@ static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
-static void qlcnic_fw_poll_work(struct work_struct *work);
-static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay);
-static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev);
#endif
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
-static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
static irqreturn_t qlcnic_intr(int irq, void *data);
static irqreturn_t qlcnic_msi_intr(int irq, void *data);
static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
-static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
static int qlcnic_start_firmware(struct qlcnic_adapter *);
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
-static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
- struct qlcnic_esw_func_cfg *);
-static int qlcnic_vlan_rx_add(struct net_device *, u16);
-static int qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
+
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
-#define QLCNIC_IS_TSO_CAPABLE(adapter) \
- ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
+ return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
+ else
+ return 1;
+}
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
-
static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
};
@@ -120,26 +132,144 @@ static const u32 msi_tgt_status[8] = {
ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
};
+static const u32 qlcnic_reg_tbl[] = {
+ 0x1B20A8, /* PEG_HALT_STAT1 */
+ 0x1B20AC, /* PEG_HALT_STAT2 */
+ 0x1B20B0, /* FW_HEARTBEAT */
+ 0x1B2100, /* LOCK ID */
+ 0x1B2128, /* FW_CAPABILITIES */
+ 0x1B2138, /* drv active */
+ 0x1B2140, /* dev state */
+ 0x1B2144, /* drv state */
+ 0x1B2148, /* drv scratch */
+ 0x1B214C, /* dev partition info */
+ 0x1B2174, /* drv idc ver */
+ 0x1B2150, /* fw version major */
+ 0x1B2154, /* fw version minor */
+ 0x1B2158, /* fw version sub */
+ 0x1B219C, /* npar state */
+ 0x1B21FC, /* FW_IMG_VALID */
+ 0x1B2250, /* CMD_PEG_STATE */
+ 0x1B233C, /* RCV_PEG_STATE */
+ 0x1B23B4, /* ASIC TEMP */
+ 0x1B216C, /* FW api */
+ 0x1B2170, /* drv op mode */
+ 0x13C010, /* flash lock */
+ 0x13C014, /* flash unlock */
+};
+
static const struct qlcnic_board_info qlcnic_boards[] = {
- {0x1077, 0x8020, 0x1077, 0x203,
- "8200 Series Single Port 10GbE Converged Network Adapter"
- "(TCP/IP Networking)"},
- {0x1077, 0x8020, 0x1077, 0x207,
- "8200 Series Dual Port 10GbE Converged Network Adapter"
- "(TCP/IP Networking)"},
- {0x1077, 0x8020, 0x1077, 0x20b,
- "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x1077, 0x20c,
- "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x1077, 0x20f,
- "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
- {0x1077, 0x8020, 0x103c, 0x3733,
- "NC523SFP 10Gb 2-port Server Adapter"},
- {0x1077, 0x8020, 0x103c, 0x3346,
- "CN1000Q Dual Port Converged Network Adapter"},
- {0x1077, 0x8020, 0x1077, 0x210,
- "QME8242-k 10GbE Dual Port Mezzanine Card"},
- {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE844X,
+ 0x0,
+ 0x0,
+ "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x243,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24a,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x246,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x252,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x26e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x260,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x266,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x269,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x271,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ 0x0, 0x0, "8300 Series 1/10GbE Controller" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Server Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x210,
+ "QME8242-k 10GbE Dual Port Mezzanine Card" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x0, 0x0, "cLOM8214 1/10GbE Controller" },
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
@@ -164,74 +294,70 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
recv_ctx->sds_rings = NULL;
}
-static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
+int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
- memset(&adapter->stats, 0, sizeof(adapter->stats));
-}
-
-static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
-{
- u32 control;
- int pos;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
- if (enable)
- control |= PCI_MSIX_FLAGS_ENABLE;
- else
- control = 0;
- pci_write_config_dword(pdev, pos, control);
- }
-}
-
-static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- adapter->msix_entries[i].entry = i;
-}
-
-static int
-qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
-{
- u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret;
- if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
- return -EIO;
+ ret = qlcnic_get_mac_address(adapter, mac_addr,
+ adapter->ahw->pci_func);
+ if (ret)
+ return ret;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
netdev->dev_addr);
return 0;
}
+static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ struct list_head *head;
+
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ return;
+ }
+ }
+}
+
static int qlcnic_set_mac(struct net_device *netdev, void *p)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+
if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
return -EOPNOTSUPP;
if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
+ return -EINVAL;
+
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data))
+ return 0;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
qlcnic_napi_disable(adapter);
}
+ qlcnic_delete_adapter_mac(adapter);
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
qlcnic_set_multi(adapter->netdev);
@@ -243,6 +369,135 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev, const unsigned char *addr)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EOPNOTSUPP;
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter)) {
+ if (is_unicast_ether_addr(addr)) {
+ err = dev_uc_del(netdev, addr);
+ if (!err)
+ err = qlcnic_nic_del_mac(adapter, addr);
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_del(netdev, addr);
+ } else {
+ err = -EINVAL;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 flags)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ !qlcnic_sriov_check(adapter)) {
+ pr_info("%s: FDB e-switch is not enabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (ether_addr_equal(addr, adapter->mac_addr))
+ return err;
+
+ if (is_unicast_ether_addr(addr)) {
+ if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count)
+ err = dev_uc_add_excl(netdev, addr);
+ else
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(netdev, addr);
+ } else {
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
+ struct net_device *netdev, int idx)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter))
+ idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+
+ return idx;
+}
+
+static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ if (!adapter->fw_work.work.func)
+ return;
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(ahw->phys_port_id);
+ memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
+#ifdef CONFIG_QLCNIC_VXLAN
+static void qlcnic_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Adapter supports only one VXLAN port. Use very first port
+ * for enabling offload
+ */
+ if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+ return;
+
+ ahw->vxlan_port = ntohs(port);
+ adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+}
+
+static void qlcnic_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+ (ahw->vxlan_port != ntohs(port)))
+ return;
+
+ adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+}
+#endif
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -257,9 +512,24 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_tx_timeout = qlcnic_tx_timeout,
.ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
.ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
+ .ndo_fdb_add = qlcnic_fdb_add,
+ .ndo_fdb_del = qlcnic_fdb_del,
+ .ndo_fdb_dump = qlcnic_fdb_dump,
+ .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
+#ifdef CONFIG_QLCNIC_VXLAN
+ .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
+ .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
+#ifdef CONFIG_QLCNIC_SRIOV
+ .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
+ .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
+ .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
+ .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
+#endif
};
static const struct net_device_ops qlcnic_netdev_failed_ops = {
@@ -267,50 +537,290 @@ static const struct net_device_ops qlcnic_netdev_failed_ops = {
};
static struct qlcnic_nic_template qlcnic_ops = {
- .config_bridged_mode = qlcnic_config_bridged_mode,
- .config_led = qlcnic_config_led,
- .start_firmware = qlcnic_start_firmware
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_82xx_config_led,
+ .start_firmware = qlcnic_82xx_start_firmware,
+ .request_reset = qlcnic_82xx_dev_request_reset,
+ .cancel_idc_work = qlcnic_82xx_cancel_idc_work,
+ .napi_add = qlcnic_82xx_napi_add,
+ .napi_del = qlcnic_82xx_napi_del,
+ .config_ipaddr = qlcnic_82xx_config_ipaddr,
+ .shutdown = qlcnic_82xx_shutdown,
+ .resume = qlcnic_82xx_resume,
+ .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr,
+};
+
+struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
};
-static struct qlcnic_nic_template qlcnic_vf_ops = {
- .config_bridged_mode = qlcnicvf_config_bridged_mode,
- .config_led = qlcnicvf_config_led,
- .start_firmware = qlcnicvf_start_firmware
+static struct qlcnic_hardware_ops qlcnic_hw_ops = {
+ .read_crb = qlcnic_82xx_read_crb,
+ .write_crb = qlcnic_82xx_write_crb,
+ .read_reg = qlcnic_82xx_hw_read_wx_2M,
+ .write_reg = qlcnic_82xx_hw_write_wx_2M,
+ .get_mac_address = qlcnic_82xx_get_mac_address,
+ .setup_intr = qlcnic_82xx_setup_intr,
+ .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_82xx_issue_cmd,
+ .get_func_no = qlcnic_82xx_get_func_no,
+ .api_lock = qlcnic_82xx_api_lock,
+ .api_unlock = qlcnic_82xx_api_unlock,
+ .add_sysfs = qlcnic_82xx_add_sysfs,
+ .remove_sysfs = qlcnic_82xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
+ .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx,
+ .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx,
+ .setup_link_event = qlcnic_82xx_linkevent_request,
+ .get_nic_info = qlcnic_82xx_get_nic_info,
+ .get_pci_info = qlcnic_82xx_get_pci_info,
+ .set_nic_info = qlcnic_82xx_set_nic_info,
+ .change_macvlan = qlcnic_82xx_sre_macaddr_change,
+ .napi_enable = qlcnic_82xx_napi_enable,
+ .napi_disable = qlcnic_82xx_napi_disable,
+ .config_intr_coal = qlcnic_82xx_config_intr_coalesce,
+ .config_rss = qlcnic_82xx_config_rss,
+ .config_hw_lro = qlcnic_82xx_config_hw_lro,
+ .config_loopback = qlcnic_82xx_set_lb_mode,
+ .clear_loopback = qlcnic_82xx_clear_lb_mode,
+ .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_82xx_change_filter,
+ .get_board_info = qlcnic_82xx_get_board_info,
+ .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
+ .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
+ .io_error_detected = qlcnic_82xx_io_error_detected,
+ .io_slot_reset = qlcnic_82xx_io_slot_reset,
+ .io_resume = qlcnic_82xx_io_resume,
+ .get_beacon_state = qlcnic_82xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_82xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
+ .get_saved_state = qlcnic_82xx_get_saved_state,
+ .set_saved_state = qlcnic_82xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_82xx_get_cap_size,
+ .set_sys_info = qlcnic_82xx_set_sys_info,
+ .store_cap_mask = qlcnic_82xx_store_cap_mask,
};
-static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_82xx_check(adapter) &&
+ (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+ int queue_type)
+{
+ int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ max_rings = adapter->max_sds_rings;
+ else if (queue_type == QLCNIC_TX_QUEUE)
+ max_rings = adapter->max_tx_rings;
+
+ num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ max_rings));
+
+ if (ring_cnt > num_rings)
+ return num_rings;
+ else
+ return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+ /* 83xx adapter does not have max_tx_rings intialized in probe */
+ if (adapter->max_tx_rings)
+ adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+ QLCNIC_TX_QUEUE);
+ else
+ adapter->drv_tx_rings = tx_cnt;
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+ /* 83xx adapter does not have max_sds_rings intialized in probe */
+ if (adapter->max_sds_rings)
+ adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+ QLCNIC_RX_QUEUE);
+ else
+ adapter->drv_sds_rings = rx_cnt;
+}
+
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int num_msix = 0, err = 0, vector;
+
+ adapter->flags &= ~QLCNIC_TSS_RSS;
+
+ if (adapter->drv_tss_rings > 0)
+ num_msix += adapter->drv_tss_rings;
+ else
+ num_msix += adapter->drv_tx_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ num_msix += adapter->drv_rss_rings;
+ else
+ num_msix += adapter->drv_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+restore:
+ err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+ if (err == -ENOSPC) {
+ if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
+ return err;
+
+ netdev_info(adapter->netdev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+ /* Set rings to 0 so we can restore original TSS/RSS count */
+ adapter->drv_tss_rings = 0;
+ adapter->drv_rss_rings = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ netdev_info(adapter->netdev,
+ "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+ adapter->drv_tx_rings, adapter->drv_sds_rings,
+ num_msix);
+
+ goto restore;
+ } else if (err < 0) {
+ return err;
+ }
+
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
+
+ return 0;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1;
+ int err, vector;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
- adapter->max_sds_rings = 1;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
- qlcnic_set_msix_bit(pdev, 0);
if (adapter->ahw->msix_supported) {
- enable_msix:
- qlcnic_init_msix_entries(adapter, num_msix);
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
- adapter->flags |= QLCNIC_MSIX_ENABLED;
- qlcnic_set_msix_bit(pdev, 1);
+enable_msix:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
- adapter->max_sds_rings = num_msix;
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries, 1, num_msix);
+ if (err == num_msix) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
- return err;
- }
- if (err > 0) {
- num_msix = rounddown_pow_of_two(err);
- if (num_msix)
+ return 0;
+ } else if (err > 0) {
+ pci_disable_msix(pdev);
+
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ if (qlcnic_82xx_check(adapter)) {
+ num_msix = rounddown_pow_of_two(err);
+ if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+ return -ENOSPC;
+ } else {
+ num_msix = rounddown_pow_of_two(err - 1);
+ num_msix += 1;
+ if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+ return -ENOSPC;
+ }
+
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ adapter->drv_sds_rings = num_msix;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ } else {
+ /* Distribute vectors equally */
+ adapter->drv_tx_rings = num_msix / 2;
+ adapter->drv_sds_rings = adapter->drv_tx_rings;
+ }
+
+ if (num_msix) {
+ dev_info(&pdev->dev,
+ "Trying to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
goto enable_msix;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X vectors, err=%d\n",
+ num_msix, err);
+ return err;
}
}
- return err;
+
+ return -EIO;
+}
+
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
+ else
+ num_msix += QLCNIC_SINGLE_RING;
+
+ return num_msix;
}
-static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
+ int err = 0;
u32 offset, mask_reg;
const struct qlcnic_legacy_intr_set *legacy_intrp;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -323,9 +833,12 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
offset);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
- return;
+ return err;
}
+ if (qlcnic_use_msi || qlcnic_use_msi_x)
+ return -EOPNOTSUPP;
+
legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
offset = legacy_intrp->tgt_status_reg;
@@ -336,49 +849,146 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
dev_info(&pdev->dev, "using legacy interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
+ return err;
}
-static void
-qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
{
- int num_msix;
+ int num_msix, err = 0;
- if (adapter->ahw->msix_supported) {
- num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- QLCNIC_DEF_NUM_STS_DESC_RINGS));
- } else
- num_msix = 1;
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = adapter->ahw->num_msix;
+ } else {
+ num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
- if (!qlcnic_enable_msix(adapter, num_msix))
- return;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
- qlcnic_enable_msi_legacy(adapter);
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
-static void
-qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ ahw->intr_tbl = vzalloc(ahw->num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ err = qlcnic_82xx_config_intrpt(adapter, 1);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure Interrupt for %d vector\n",
+ ahw->num_msix);
+ return err;
+ }
+
+ return 0;
+}
+
+void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
{
if (adapter->flags & QLCNIC_MSIX_ENABLED)
pci_disable_msix(adapter->pdev);
if (adapter->flags & QLCNIC_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ if (adapter->ahw->intr_tbl) {
+ vfree(adapter->ahw->intr_tbl);
+ adapter->ahw->intr_tbl = NULL;
+ }
}
-static void
-qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
{
- if (adapter->ahw->pci_base0 != NULL)
- iounmap(adapter->ahw->pci_base0);
+ if (ahw->pci_base0 != NULL)
+ iounmap(ahw->pci_base0);
}
-static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_pci_info *pci_info;
- int i, ret = 0, j = 0;
+ int ret;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ switch (ahw->port_type) {
+ case QLCNIC_GBE:
+ ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ break;
+ case QLCNIC_XGBE:
+ ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
+ break;
+ }
+ return 0;
+ }
+
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ kfree(pci_info);
+ return ret;
+}
+
+static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
+{
+ bool ret = false;
+
+ if (qlcnic_84xx_check(adapter)) {
+ ret = true;
+ } else if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
+int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_pci_info *pci_info;
+ int i, id = 0, ret = 0, j = 0;
u16 act_pci_func;
u8 pfn;
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -386,7 +996,7 @@ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (ret)
goto err_pci_info;
- act_pci_func = adapter->ahw->act_pci_func;
+ act_pci_func = ahw->total_nic_func;
adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
act_pci_func, GFP_KERNEL);
@@ -402,11 +1012,13 @@ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
goto err_npars;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
+ for (i = 0; i < ahw->max_vnic_func; i++) {
pfn = pci_info[i].id;
- if (pfn >= QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
+ dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+ __func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
}
@@ -414,17 +1026,34 @@ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
(pci_info[i].type != QLCNIC_TYPE_NIC))
continue;
+ if (qlcnic_port_eswitch_cfg_capability(adapter)) {
+ if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+ &id))
+ adapter->npars[j].eswitch_status = true;
+ else
+ continue;
+ } else {
+ adapter->npars[j].eswitch_status = true;
+ }
+
adapter->npars[j].pci_func = pfn;
adapter->npars[j].active = (u8)pci_info[i].active;
adapter->npars[j].type = (u8)pci_info[i].type;
adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+
+ memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
j++;
}
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
- adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ /* Update eSwitch status for adapters without per port eSwitch
+ * configuration capability
+ */
+ if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ }
kfree(pci_info);
return 0;
@@ -445,7 +1074,7 @@ static int
qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
{
u8 id;
- int i, ret = 1;
+ int ret;
u32 data = QLCNIC_MGMT_FUNC;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -453,49 +1082,30 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
if (ret)
goto err_lock;
- if (qlcnic_config_npars) {
- for (i = 0; i < ahw->act_pci_func; i++) {
- id = adapter->npars[i].pci_func;
- if (id == ahw->pci_func)
- continue;
- data |= (qlcnic_config_npars &
- QLC_DEV_SET_DRV(0xf, id));
- }
- } else {
- data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
- data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
- (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
- ahw->pci_func));
- }
- QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
+ id = ahw->pci_func;
+ data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+ data = (data & ~QLC_DEV_SET_DRV(0xf, id)) |
+ QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
qlcnic_api_unlock(adapter);
err_lock:
return ret;
}
-static void
-qlcnic_check_vf(struct qlcnic_adapter *adapter)
+static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
{
- void __iomem *msix_base_addr;
- void __iomem *priv_op;
- u32 func;
- u32 msix_base;
u32 op_mode, priv_level;
/* Determine FW API version */
- adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
- QLCNIC_FW_API);
+ adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FW_API);
/* Find PCI function number */
- pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
- msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
- msix_base = readl(msix_base_addr);
- func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
- adapter->ahw->pci_func = func;
+ qlcnic_get_func_no(adapter);
/* Determine function privilege level */
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
@@ -512,12 +1122,19 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
}
#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
{
switch (dev_id) {
case PCI_DEVICE_ID_QLOGIC_QLE824X:
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ *bar = QLCNIC_83XX_BAR0_LENGTH;
+ break;
default:
*bar = 0;
}
@@ -546,7 +1163,8 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
return -EIO;
}
- dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+ dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
+
ahw->pci_base0 = mem_ptr0;
ahw->pci_len0 = pci_len0;
offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
@@ -555,6 +1173,27 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
return 0;
}
+static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+ int index)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned short subsystem_vendor;
+ bool ret = true;
+
+ subsystem_vendor = pdev->subsystem_vendor;
+
+ if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ if (qlcnic_boards[index].sub_vendor == subsystem_vendor &&
+ qlcnic_boards[index].sub_device == pdev->subsystem_device)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
{
struct pci_dev *pdev = adapter->pdev;
@@ -562,38 +1201,43 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
if (qlcnic_boards[i].vendor == pdev->vendor &&
- qlcnic_boards[i].device == pdev->device &&
- qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
- qlcnic_boards[i].sub_device == pdev->subsystem_device) {
- sprintf(name, "%pM: %s" ,
- adapter->mac_addr,
- qlcnic_boards[i].short_name);
- found = 1;
- break;
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_validate_subsystem_id(adapter, i)) {
+ found = 1;
+ break;
}
-
}
if (!found)
sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
+ else
+ sprintf(name, "%pM: %s" , adapter->mac_addr,
+ qlcnic_boards[i].short_name);
}
static void
qlcnic_check_options(struct qlcnic_adapter *adapter)
{
+ int err;
u32 fw_major, fw_minor, fw_build, prev_fw_version;
struct pci_dev *pdev = adapter->pdev;
- struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
prev_fw_version = adapter->fw_version;
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
- if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ return;
+ }
+ if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
if (fw_dump->tmpl_hdr == NULL ||
adapter->fw_version > prev_fw_version) {
if (fw_dump->tmpl_hdr)
@@ -604,8 +1248,9 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
}
}
- dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
- fw_major, fw_minor, fw_build);
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
@@ -635,8 +1280,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
static int
qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
{
- int err;
struct qlcnic_info nic_info;
+ int err = 0;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
@@ -648,13 +1293,33 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
adapter->ahw->capabilities = nic_info.capabilities;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ u32 temp;
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
+ if (err == -EIO)
+ return err;
+ adapter->ahw->extra_capability[0] = temp;
+ } else {
+ adapter->ahw->extra_capability[0] = 0;
+ }
+
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
- if (adapter->ahw->capabilities & BIT_6)
+ if (adapter->ahw->capabilities & BIT_6) {
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
- else
+ adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
return err;
}
@@ -667,24 +1332,50 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
else
adapter->flags |= QLCNIC_TAGGING_ENABLED;
- if (esw_cfg->vlan_id)
- adapter->pvid = esw_cfg->vlan_id;
- else
- adapter->pvid = 0;
+ if (esw_cfg->vlan_id) {
+ adapter->rx_pvid = esw_cfg->vlan_id;
+ adapter->tx_pvid = esw_cfg->vlan_id;
+ } else {
+ adapter->rx_pvid = 0;
+ adapter->tx_pvid = 0;
+ }
}
static int
-qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
+qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot add VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
+
set_bit(vid, adapter->vlans);
return 0;
}
static int
-qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
+qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot delete VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
clear_bit(vid, adapter->vlans);
@@ -705,11 +1396,9 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
if (!esw_cfg->promisc_mode)
adapter->flags |= QLCNIC_PROMISC_DISABLED;
-
- qlcnic_set_netdev_features(adapter, esw_cfg);
}
-static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
@@ -721,47 +1410,28 @@ static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
return -EIO;
qlcnic_set_vlan_config(adapter, &esw_cfg);
qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+ qlcnic_set_netdev_features(adapter, &esw_cfg);
return 0;
}
-static void
-qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
- struct qlcnic_esw_func_cfg *esw_cfg)
+void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- netdev_features_t features, vlan_features;
-
- features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO);
- vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
-
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
- features |= (NETIF_F_TSO | NETIF_F_TSO6);
- vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
- }
- if (netdev->features & NETIF_F_LRO)
- features |= NETIF_F_LRO;
-
- if (esw_cfg->offload_flags & BIT_0) {
- netdev->features |= features;
- if (!(esw_cfg->offload_flags & BIT_1))
- netdev->features &= ~NETIF_F_TSO;
- if (!(esw_cfg->offload_flags & BIT_2))
- netdev->features &= ~NETIF_F_TSO6;
- } else {
- netdev->features &= ~features;
- }
+ if (qlcnic_83xx_check(adapter))
+ return;
- netdev->vlan_features = (features & vlan_features);
+ adapter->offload_flags = esw_cfg->offload_flags;
+ adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
+ netdev_update_features(netdev);
+ adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
}
static int
qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
{
- void __iomem *priv_op;
u32 op_mode, priv_level;
int err = 0;
@@ -772,8 +1442,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
return 0;
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (op_mode == QLC_DEV_DRV_DEFAULT)
@@ -798,6 +1467,8 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
"HAL Version: %d, Privileged function\n",
adapter->ahw->fw_hal_version);
}
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
}
adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
@@ -805,7 +1476,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
return err;
}
-static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
struct qlcnic_npar_info *npar;
@@ -814,7 +1485,10 @@ static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
return 0;
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
esw_cfg.pci_func = adapter->npars[i].pci_func;
esw_cfg.mac_override = BIT_0;
@@ -838,6 +1512,7 @@ static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
return 0;
}
+
static int
qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
struct qlcnic_npar_info *npar, int pci_func)
@@ -861,7 +1536,7 @@ qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
{
int i, err;
struct qlcnic_npar_info *npar;
@@ -873,12 +1548,14 @@ static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
return 0;
/* Set the NPAR config data after FW reset */
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- err = qlcnic_get_nic_info(adapter,
- &nic_info, pci_func);
+ err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (err)
return err;
nic_info.min_tx_bw = npar->min_bw;
@@ -909,14 +1586,16 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
msleep(1000);
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
}
if (!npar_opt_timeo) {
dev_err(&adapter->pdev->dev,
- "Waiting for NPAR state to opertional timeout\n");
+ "Waiting for NPAR state to operational timeout\n");
return -EIO;
}
return 0;
@@ -944,8 +1623,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
return err;
}
-static int
-qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
{
int err;
@@ -985,9 +1663,8 @@ check_fw_status:
if (err)
goto err_out;
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
qlcnic_idc_debug_info(adapter, 1);
-
err = qlcnic_check_eswitch_mode(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
@@ -1005,7 +1682,7 @@ check_fw_status:
return 0;
err_out:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
dev_err(&adapter->pdev->dev, "Device state set to failed\n");
qlcnic_release_firmware(adapter);
@@ -1017,14 +1694,18 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
{
irq_handler_t handler;
struct qlcnic_host_sds_ring *sds_ring;
- int err, ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int err, ring, num_sds_rings;
unsigned long flags = 0;
struct net_device *netdev = adapter->netdev;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- handler = qlcnic_tmp_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_tmp_intr;
+ else
+ handler = qlcnic_83xx_tmp_intr;
if (!QLCNIC_IS_MSI_FAMILY(adapter))
flags |= IRQF_SHARED;
@@ -1035,20 +1716,64 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
handler = qlcnic_msi_intr;
else {
flags |= IRQF_SHARED;
- handler = qlcnic_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_intr;
+ else
+ handler = qlcnic_83xx_intr;
}
}
adapter->irq = netdev->irq;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
- err = request_irq(sds_ring->irq, handler,
- flags, sds_ring->name, sds_ring);
- if (err)
- return err;
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ num_sds_rings = adapter->drv_sds_rings;
+ for (ring = 0; ring < num_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter) &&
+ (ring == (num_sds_rings - 1))) {
+ if (!(adapter->flags &
+ QLCNIC_MSIX_ENABLED))
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "qlcnic");
+ else
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "%s-tx-0-rx-%d",
+ netdev->name, ring);
+ } else {
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "%s-rx-%d",
+ netdev->name, ring);
+ }
+ err = request_irq(sds_ring->irq, handler, flags,
+ sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+ }
+ if ((qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter)) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
+ handler = qlcnic_msix_tx_intr;
+ for (ring = 0; ring < adapter->drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ snprintf(tx_ring->name, sizeof(tx_ring->name),
+ "%s-tx-%d", netdev->name, ring);
+ err = request_irq(tx_ring->irq, handler, flags,
+ tx_ring->name, tx_ring);
+ if (err)
+ return err;
+ }
+ }
}
-
return 0;
}
@@ -1057,21 +1782,78 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- free_irq(sds_ring->irq, sds_ring);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+ }
+ if ((qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter))) {
+ for (ring = 0; ring < adapter->drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->irq)
+ free_irq(tx_ring->irq, tx_ring);
+ }
+ }
}
}
-static int
-__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
{
- int ring;
- u32 capab2;
+ u32 capab = 0;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ } else {
+ capab = adapter->ahw->capabilities;
+ if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+}
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ /* Initialize interrupt coalesce parameters */
+ ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_82xx_set_rx_coalesce(adapter);
+ }
+
+ return err;
+}
+
+int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1079,21 +1861,18 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
return 0;
+
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
- capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
- if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
- adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
- }
+ qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
qlcnic_set_multi(netdev);
@@ -1101,27 +1880,25 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->ahw->linkup = 0;
- if (adapter->max_sds_rings > 1)
+ if (adapter->drv_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
- qlcnic_config_intr_coalesce(adapter);
+ qlcnic_config_def_intr_coalesce(adapter);
if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
qlcnic_napi_enable(adapter);
qlcnic_linkevent_request(adapter, 1);
adapter->ahw->reset_context = 0;
- set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ netif_tx_start_all_queues(netdev);
return 0;
}
-/* Usage: During resume and firmware recovery module.*/
-
-static int
-qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int err = 0;
@@ -1133,9 +1910,10 @@ qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return err;
}
-static void
-__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
+ int ring;
+
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1143,8 +1921,8 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
return;
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
+ adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
qlcnic_free_mac_list(adapter);
@@ -1153,6 +1931,8 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_delete_lb_filters(adapter);
qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
qlcnic_napi_disable(adapter);
@@ -1160,14 +1940,14 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
- qlcnic_release_tx_buffers(adapter);
- spin_unlock(&adapter->tx_clean_lock);
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+ qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
}
/* Usage: During suspend and firmware recovery module */
-static void
-qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
rtnl_lock();
if (netif_running(netdev))
@@ -1176,7 +1956,7 @@ qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
}
-static int
+int
qlcnic_attach(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1210,6 +1990,11 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
qlcnic_create_sysfs_entries(adapter);
+#ifdef CONFIG_QLCNIC_VXLAN
+ if (qlcnic_encap_rx_offload(adapter))
+ vxlan_get_rx_port(netdev);
+#endif
+
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
@@ -1222,8 +2007,7 @@ err_out_napi_del:
return err;
}
-static void
-qlcnic_detach(struct qlcnic_adapter *adapter)
+void qlcnic_detach(struct qlcnic_adapter *adapter)
{
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1239,17 +2023,18 @@ qlcnic_detach(struct qlcnic_adapter *adapter)
adapter->is_up = 0;
}
-void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
+ int drv_tx_rings = adapter->drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_disable_int(sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
}
}
@@ -1258,7 +2043,8 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
qlcnic_detach(adapter);
adapter->ahw->diag_test = 0;
- adapter->max_sds_rings = max_sds_rings;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1271,44 +2057,54 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
- adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
- GFP_KERNEL);
- if (!adapter->ahw) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- err = -ENOMEM;
- goto err_out;
- }
+
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
GFP_KERNEL);
if (!adapter->recv_ctx) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- kfree(adapter->ahw);
- adapter->ahw = NULL;
err = -ENOMEM;
goto err_out;
}
- /* Initialize interrupt coalesce parameters */
- adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
- adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
- adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ }
+
+ /* clear stats */
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
return err;
}
static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
kfree(adapter->recv_ctx);
adapter->recv_ctx = NULL;
- if (adapter->ahw->fw_dump.tmpl_hdr) {
- vfree(adapter->ahw->fw_dump.tmpl_hdr);
- adapter->ahw->fw_dump.tmpl_hdr = NULL;
+ if (fw_dump->tmpl_hdr) {
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ }
+
+ if (fw_dump->dma_buffer) {
+ dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
+ fw_dump->dma_buffer, fw_dump->phys_addr);
+ fw_dump->dma_buffer = NULL;
}
- kfree(adapter->ahw);
- adapter->ahw = NULL;
+
+ kfree(adapter->ahw->reset.buff);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
}
int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
@@ -1326,8 +2122,9 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
- adapter->max_sds_rings = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1344,13 +2141,13 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
}
}
@@ -1382,6 +2179,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
return 0;
}
@@ -1418,49 +2216,121 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
return err;
}
-static int
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 act_pci_fn = ahw->total_nic_func;
+ u16 count;
+
+ ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
+ if (act_pci_fn <= 2)
+ count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) /
+ act_pci_fn;
+ else
+ count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) /
+ act_pci_fn;
+ ahw->max_uc_count = count;
+}
+
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ u8 tx_queues, u8 rx_queues)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (tx_queues) {
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ tx_queues);
+ return err;
+ }
+ }
+
+ if (rx_queues) {
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev, "failed to set %d Rx queues\n",
+ rx_queues);
+ }
+
+ return err;
+}
+
+int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int pci_using_dac)
{
int err;
struct pci_dev *pdev = adapter->pdev;
+ adapter->rx_csum = 1;
adapter->ahw->mc_enabled = 0;
- adapter->ahw->max_mc_count = 38;
+ qlcnic_set_mac_filter_count(adapter);
netdev->netdev_ops = &qlcnic_netdev_ops;
- netdev->watchdog_timeo = 5*HZ;
+ netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
qlcnic_change_mtu(netdev, netdev->mtu);
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+ netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+ &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
+
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
- netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
+
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
- netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (pci_using_dac == 1)
- netdev->hw_features |= NETIF_F_HIGHDMA;
+ if (qlcnic_vlan_tx_check(adapter))
+ netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
- netdev->vlan_features = netdev->hw_features;
+ if (qlcnic_sriov_vf_check(adapter))
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
- netdev->hw_features |= NETIF_F_HW_VLAN_TX;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
- netdev->hw_features |= NETIF_F_LRO;
+ netdev->features |= NETIF_F_LRO;
+
+ if (qlcnic_encap_tx_offload(adapter)) {
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ /* encapsulation Tx offload supported by Adapter */
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+ }
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ if (qlcnic_encap_rx_offload(adapter))
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ netdev->hw_features = netdev->features;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
+ err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ return err;
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register net device\n");
return err;
}
+ qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
+
return 0;
}
@@ -1480,27 +2350,96 @@ static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
return 0;
}
-static int
-qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
+void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
{
- adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
- GFP_KERNEL);
+ int ring;
+ struct qlcnic_host_tx_ring *tx_ring;
- if (adapter->msix_entries)
- return 0;
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
+ vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
+ }
+ }
+ if (adapter->tx_ring != NULL)
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, vector, index;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
- dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
- return -ENOMEM;
+ tx_ring = kcalloc(adapter->drv_tx_rings,
+ sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
+
+ adapter->tx_ring = tx_ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, ring);
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ qlcnic_free_tx_rings(adapter);
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+ spin_lock_init(&tx_ring->tx_clean_lock);
+ }
+
+ if (qlcnic_83xx_check(adapter) ||
+ (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->adapter = adapter;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ index = adapter->drv_sds_rings + ring;
+ vector = adapter->msix_entries[index].vector;
+ tx_ring->irq = vector;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 fw_cmd = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER;
+ else if (qlcnic_83xx_check(adapter))
+ fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
+
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)
+ qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
+/* Reset firmware API lock */
+static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_api_lock(adapter);
+ qlcnic_api_unlock(adapter);
+}
+
+
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
+ struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
- uint8_t revision_id;
- char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
err = pci_enable_device(pdev);
if (err)
@@ -1522,81 +2461,174 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_enable_pcie_error_reporting(pdev);
- netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
- if (!netdev) {
+ ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
+ if (!ahw) {
err = -ENOMEM;
goto err_out_free_res;
}
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
+ ahw->hw_ops = &qlcnic_hw_ops;
+ ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ qlcnic_83xx_register_map(ahw);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ qlcnic_sriov_vf_register_map(ahw);
+ break;
+ default:
+ goto err_out_free_hw_res;
+ }
+
+ err = qlcnic_setup_pci_map(pdev, ahw);
+ if (err)
+ goto err_out_free_hw_res;
+
+ netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+ QLCNIC_MAX_TX_RINGS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->ahw = ahw;
+
+ adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (adapter->qlcnic_wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to create workqueue\n");
+ goto err_out_free_netdev;
+ }
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
- goto err_out_free_netdev;
+ goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
- revision_id = pdev->revision;
- adapter->ahw->revision_id = revision_id;
- adapter->mac_learn = qlcnic_mac_learn;
+ ahw->revision_id = pdev->revision;
+ ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
+ if (qlcnic_mac_learn == FDB_MAC_LEARN)
+ adapter->fdb_mac_learn = true;
+ else if (qlcnic_mac_learn == DRV_MAC_LEARN)
+ adapter->drv_mac_learn = true;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- err = qlcnic_setup_pci_map(pdev, adapter->ahw);
- if (err)
- goto err_out_free_hw;
- qlcnic_check_vf(adapter);
+ qlcnic_register_dcb(adapter);
+
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ qlcnic_reset_api_lock(adapter);
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+ "\t\tIf reboot doesn't help, try flashing the card\n");
+ goto err_out_maintenance_mode;
+ }
- /* This will be reset for mezz cards */
- adapter->portnum = adapter->ahw->pci_func;
+ /* compute and set default and max tx/sds rings */
+ if (adapter->ahw->msix_supported) {
+ if (qlcnic_check_multi_tx_capability(adapter) == 1)
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_SINGLE_RING);
+ else
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_DEF_TX_RINGS);
+ qlcnic_set_sds_ring_count(adapter,
+ QLCNIC_DEF_SDS_RINGS);
+ } else {
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+ }
- err = qlcnic_get_board_info(adapter);
- if (err) {
- dev_err(&pdev->dev, "Error getting board config info.\n");
- goto err_out_iounmap;
- }
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
+ goto err_out_free_hw;
- err = qlcnic_setup_idc_param(adapter);
- if (err)
- goto err_out_iounmap;
+ adapter->flags |= QLCNIC_NEED_FLR;
- adapter->flags |= QLCNIC_NEED_FLR;
+ } else if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_83xx_init(adapter, pci_using_dac);
+ if (err) {
+ switch (err) {
+ case -ENOTRECOVERABLE:
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+ dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
+ goto err_out_free_hw;
+ case -ENOMEM:
+ dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+ goto err_out_free_hw;
+ case -EOPNOTSUPP:
+ dev_err(&pdev->dev, "Adapter initialization failed\n");
+ goto err_out_free_hw;
+ default:
+ dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
+ goto err_out_maintenance_mode;
+ }
+ }
- err = adapter->nic_ops->start_firmware(adapter);
- if (err) {
- dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
- "\t\tIf reboot doesn't help, try flashing the card\n");
- goto err_out_maintenance_mode;
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+ } else {
+ dev_err(&pdev->dev,
+ "%s: failed. Please Reboot\n", __func__);
+ goto err_out_free_hw;
}
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
+ qlcnic_read_phys_port_id(adapter);
+
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
+
pr_info("%s: %s Board Chip rev 0x%x\n",
module_name(THIS_MODULE),
board_name, adapter->ahw->revision_id);
}
- qlcnic_clear_stats(adapter);
+ if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
+ !!qlcnic_use_msi)
+ dev_warn(&pdev->dev,
+ "Device does not support MSI interrupts\n");
- err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
- if (err)
- goto err_out_decr_ref;
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_dcb_enable(adapter->dcb);
+ qlcnic_dcb_get_info(adapter->dcb);
+ err = qlcnic_setup_intr(adapter);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+ }
- qlcnic_setup_intr(adapter);
+ err = qlcnic_get_act_pci_func(adapter);
+ if (err)
+ goto err_out_disable_mbx_intr;
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
- goto err_out_disable_msi;
+ goto err_out_disable_mbx_intr;
+
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
pci_set_drvdata(pdev, adapter);
@@ -1615,47 +2647,66 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- if (adapter->mac_learn)
+ if (adapter->drv_mac_learn)
qlcnic_alloc_lb_filters_mem(adapter);
- qlcnic_create_diag_entries(adapter);
-
+ qlcnic_add_sysfs(adapter);
+ qlcnic_register_hwmon_dev(adapter);
return 0;
+err_out_disable_mbx_intr:
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
err_out_disable_msi:
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
-
-err_out_decr_ref:
+ qlcnic_cancel_idc_work(adapter);
qlcnic_clr_all_drv_state(adapter, 0);
-err_out_iounmap:
- qlcnic_cleanup_pci_map(adapter);
-
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
+err_out_free_wq:
+ destroy_workqueue(adapter->qlcnic_wq);
+
err_out_free_netdev:
free_netdev(netdev);
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(ahw);
+
+err_out_free_hw_res:
+ kfree(ahw);
+
err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
err_out_maintenance_mode:
+ set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
netdev->netdev_ops = &qlcnic_netdev_failed_ops;
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
+ ahw->port_type = QLCNIC_XGBE;
+
+ if (qlcnic_83xx_check(adapter))
+ adapter->tgt_status_reg = NULL;
+ else
+ ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
err = register_netdev(netdev);
+
if (err) {
- dev_err(&pdev->dev, "failed to register net device\n");
- goto err_out_decr_ref;
+ dev_err(&pdev->dev, "Failed to register net device\n");
+ qlcnic_clr_all_drv_state(adapter, 0);
+ goto err_out_free_hw;
}
+
pci_set_drvdata(pdev, adapter);
- qlcnic_create_diag_entries(adapter);
+ qlcnic_add_sysfs(adapter);
+
return 0;
}
@@ -1663,6 +2714,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter;
struct net_device *netdev;
+ struct qlcnic_hardware_context *ahw;
adapter = pci_get_drvdata(pdev);
if (adapter == NULL)
@@ -1670,9 +2722,23 @@ static void qlcnic_remove(struct pci_dev *pdev)
netdev = adapter->netdev;
- qlcnic_cancel_fw_work(adapter);
+ qlcnic_cancel_idc_work(adapter);
+ qlcnic_sriov_pf_disable(adapter);
+ ahw = adapter->ahw;
unregister_netdev(netdev);
+ qlcnic_sriov_cleanup(adapter);
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_initialize_nic(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ kfree(ahw->fw_info);
+ }
+
+ qlcnic_dcb_free(adapter->dcb);
qlcnic_detach(adapter);
@@ -1689,52 +2755,27 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_free_lb_filters_mem(adapter);
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
- qlcnic_remove_diag_entries(adapter);
+ qlcnic_remove_sysfs(adapter);
+
+ qlcnic_unregister_hwmon_dev(adapter);
- qlcnic_cleanup_pci_map(adapter);
+ qlcnic_cleanup_pci_map(adapter->ahw);
qlcnic_release_firmware(adapter);
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- qlcnic_free_adapter_resources(adapter);
- free_netdev(netdev);
-}
-static int __qlcnic_shutdown(struct pci_dev *pdev)
-{
- struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
- int retval;
-
- netif_device_detach(netdev);
-
- qlcnic_cancel_fw_work(adapter);
-
- if (netif_running(netdev))
- qlcnic_down(adapter, netdev);
-
- if (qlcnic_82xx_check(adapter))
- qlcnic_clr_all_drv_state(adapter, 0);
-
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- if (qlcnic_82xx_check(adapter)) {
- if (qlcnic_wol_supported(adapter)) {
- pci_enable_wake(pdev, PCI_D3cold, 1);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- }
+ if (adapter->qlcnic_wq) {
+ destroy_workqueue(adapter->qlcnic_wq);
+ adapter->qlcnic_wq = NULL;
}
- return 0;
+ qlcnic_free_adapter_resources(adapter);
+ kfree(ahw);
+ free_netdev(netdev);
}
static void qlcnic_shutdown(struct pci_dev *pdev)
@@ -1746,8 +2787,7 @@ static void qlcnic_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_PM
-static int
-qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
{
int retval;
@@ -1759,11 +2799,9 @@ qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int
-qlcnic_resume(struct pci_dev *pdev)
+static int qlcnic_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
int err;
err = pci_enable_device(pdev);
@@ -1774,34 +2812,19 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- err = adapter->nic_ops->start_firmware(adapter);
- if (err) {
- dev_err(&pdev->dev, "failed to start firmware\n");
- return err;
- }
-
- if (netif_running(netdev)) {
- err = qlcnic_up(adapter, netdev);
- if (err)
- goto done;
-
- qlcnic_restore_indev_addr(netdev, NETDEV_UP);
- }
-done:
- netif_device_attach(netdev);
- qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
- return 0;
+ return __qlcnic_resume(adapter);
}
#endif
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
int err;
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(netdev, "Device in FAILED state\n");
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(netdev, "%s: Device is in non-operational state\n",
+ __func__);
+
return -EIO;
}
@@ -1813,14 +2836,8 @@ static int qlcnic_open(struct net_device *netdev)
err = __qlcnic_up(adapter, netdev);
if (err)
- goto err_out;
-
- netif_start_queue(netdev);
-
- return 0;
+ qlcnic_detach(adapter);
-err_out:
- qlcnic_detach(adapter);
return err;
}
@@ -1832,29 +2849,66 @@ static int qlcnic_close(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
__qlcnic_down(adapter, netdev);
+
return 0;
}
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
int i;
+ struct net_device *netdev = adapter->netdev;
+ u32 filter_size = 0;
+ u16 act_pci_func = 0;
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
+ act_pci_func = adapter->ahw->total_nic_func;
spin_lock_init(&adapter->mac_learn_lock);
+ spin_lock_init(&adapter->rx_mac_learn_lock);
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+ adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+ } else if (qlcnic_82xx_check(adapter)) {
+ filter_size = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
+ } else {
+ filter_size = QLC_83XX_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
+ }
+
+ head = kcalloc(adapter->fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
- head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
- GFP_KERNEL);
if (!head)
return;
- adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fmax = (filter_size / act_pci_func);
adapter->fhash.fhead = head;
- for (i = 0; i < adapter->fhash.fmax; i++)
+ netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
+ act_pci_func, adapter->fhash.fmax);
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++)
INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+
+ adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
+
+ head = kcalloc(adapter->rx_fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->rx_fhash.fmax = (filter_size / act_pci_func);
+ adapter->rx_fhash.fhead = head;
+
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
}
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
@@ -1864,16 +2918,25 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
adapter->fhash.fhead = NULL;
adapter->fhash.fmax = 0;
+
+ if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
+ kfree(adapter->rx_fhash.fhead);
+
+ adapter->rx_fhash.fmax = 0;
+ adapter->rx_fhash.fhead = NULL;
}
-static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+int qlcnic_check_temp(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 temp_state, temp_val, temp = 0;
int rv = 0;
+ if (qlcnic_83xx_check(adapter))
+ temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+
if (qlcnic_82xx_check(adapter))
- temp = QLCRD32(adapter, CRB_TEMP_STATE);
+ temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
temp_state = qlcnic_get_temp_state(temp);
temp_val = qlcnic_get_temp_val(temp);
@@ -1903,6 +2966,58 @@ static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
return rv;
}
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
+{
+ int i;
+ struct cmd_desc_type0 *tx_desc_info;
+
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ tx_desc_info = &tx_ring->desc_head[i];
+ pr_info("TX Desc: %d\n", i);
+ print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ &tx_ring->desc_head[i],
+ sizeof(struct cmd_desc_type0), true);
+ }
+}
+
+static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!netdev || !netif_running(netdev))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+ ring, tx_ring->ctx_id);
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
+
+ if (tx_ring->crb_intr_mask)
+ netdev_info(netdev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+
+ netdev_info(netdev,
+ "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->producer, tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+
+ netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+ tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+ if (netif_msg_tx_done(adapter->ahw))
+ dump_tx_ring_desc(tx_ring);
+ }
+}
+
static void qlcnic_tx_timeout(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -1910,12 +3025,18 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
- dev_err(&netdev->dev, "transmit timeout, resetting.\n");
-
- if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
- adapter->need_fw_reset = 1;
- else
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
+ netdev_info(netdev, "Tx timeout, reset the adapter.\n");
+ if (qlcnic_82xx_check(adapter))
+ adapter->need_fw_reset = 1;
+ else if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ } else {
+ netdev_info(netdev, "Tx timeout, reset adapter context.\n");
+ qlcnic_dump_tx_rings(adapter);
adapter->ahw->reset_context = 1;
+ }
}
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1923,6 +3044,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &netdev->stats;
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_update_stats(adapter);
+
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
@@ -1933,7 +3057,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
return stats;
}
-static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
{
u32 status;
@@ -1972,7 +3096,7 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
done:
adapter->ahw->diag_cnt++;
- qlcnic_enable_int(sds_ring);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
return IRQ_HANDLED;
}
@@ -2009,20 +3133,50 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
+{
+ struct qlcnic_host_tx_ring *tx_ring = data;
+
+ napi_schedule(&tx_ring->napi);
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
- int ring;
- struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ recv_ctx = adapter->recv_ctx;
- disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_intr(adapter->irq, sds_ring);
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_schedule(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ /* Only Multi-Tx queue capable devices need to
+ * schedule NAPI for TX rings
+ */
+ if ((qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)))
+ return;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_schedule(&tx_ring->napi);
+ }
}
- enable_irq(adapter->irq);
}
#endif
@@ -2035,7 +3189,7 @@ qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
val |= encoding << 7;
val |= (jiffies - adapter->dev_rst_time) << 8;
- QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
adapter->dev_rst_time = jiffies;
}
@@ -2050,14 +3204,14 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
if (qlcnic_api_lock(adapter))
return -EIO;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2072,37 +3226,38 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -EBUSY;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
return 0;
}
-static void
-qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
{
u32 val;
if (qlcnic_api_lock(adapter))
goto err;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
if (failed) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_FAILED);
dev_info(&adapter->pdev->dev,
"Device state set to Failed. Please Reboot\n");
} else if (!(val & 0x11111111))
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_COLD);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
err:
@@ -2117,12 +3272,13 @@ static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
int act, state, active_mask;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
- state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
- active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
+ active_mask = (~(1 << (ahw->pci_func * 4)));
act = act & active_mask;
}
@@ -2135,7 +3291,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
{
- u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+ u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
if (val != QLCNIC_DRV_IDC_VER) {
dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
@@ -2159,19 +3315,21 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (!(val & (1 << (portnum * 4)))) {
QLC_DEV_SET_REF_CNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
}
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
- QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_DRV_IDC_VER);
qlcnic_idc_debug_info(adapter, 0);
qlcnic_api_unlock(adapter);
return 1;
@@ -2182,15 +3340,15 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
return ret;
case QLCNIC_DEV_NEED_RESET:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_QSCNT_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
@@ -2207,7 +3365,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
do {
msleep(1000);
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (prev_state == QLCNIC_DEV_QUISCENT)
continue;
@@ -2222,9 +3380,9 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
@@ -2243,7 +3401,7 @@ qlcnic_fwinit_work(struct work_struct *work)
if (qlcnic_api_lock(adapter))
goto err_ret;
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_QUISCENT ||
dev_state == QLCNIC_DEV_NEED_QUISCENT) {
qlcnic_api_unlock(adapter);
@@ -2272,23 +3430,25 @@ qlcnic_fwinit_work(struct work_struct *work)
if (!qlcnic_check_drv_state(adapter)) {
skip_ack_check:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_NEED_RESET) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
- QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCDB(adapter, DRV, "Restarting fw\n");
qlcnic_idc_debug_info(adapter, 0);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter,
+ QLCNIC_CRB_DRV_STATE, val);
}
qlcnic_api_unlock(adapter);
rtnl_lock();
- if (adapter->ahw->fw_dump.enable &&
+ if (qlcnic_check_fw_dump_state(adapter) &&
(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
QLCDB(adapter, DRV, "Take FW dump\n");
qlcnic_dump_fw(adapter);
@@ -2308,12 +3468,12 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
wait_npar:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
switch (dev_state) {
case QLCNIC_DEV_READY:
- if (!adapter->nic_ops->start_firmware(adapter)) {
+ if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
adapter->fw_wait_cnt = 0;
return;
@@ -2350,7 +3510,7 @@ qlcnic_detach_work(struct work_struct *work)
} else
qlcnic_down(adapter, netdev);
- status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (status & QLCNIC_RCODE_FATAL_ERROR) {
dev_err(&adapter->pdev->dev,
@@ -2401,19 +3561,19 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
{
u32 state;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
if (state == QLCNIC_DEV_NPAR_NON_OPER)
return;
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
-/*Transit to RESET state from READY state only */
-void
-qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
{
u32 state, xg_val = 0, gb_val = 0;
@@ -2428,25 +3588,30 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev, "Pause control frames disabled"
" on all ports\n");
adapter->need_fw_reset = 1;
+
if (qlcnic_api_lock(adapter))
return;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(adapter->netdev,
- "Device is in FAILED state, Please Reboot\n");
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
+ __func__);
qlcnic_api_unlock(adapter);
+
return;
}
if (state == QLCNIC_DEV_READY) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_NEED_RESET);
adapter->flags |= QLCNIC_FW_RESET_OWNER;
QLCDB(adapter, DRV, "NEED_RESET state set\n");
qlcnic_idc_debug_info(adapter, 0);
}
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
@@ -2457,34 +3622,22 @@ qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_OPER);
QLCDB(adapter, DRV, "NPAR operational state set\n");
qlcnic_api_unlock(adapter);
}
-static void
-qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay)
+void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
{
if (test_bit(__QLCNIC_AER, &adapter->state))
return;
INIT_DELAYED_WORK(&adapter->fw_work, func);
- queue_delayed_work(qlcnic_wq, &adapter->fw_work,
- round_jiffies_relative(delay));
-}
-
-static void
-qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
-{
- while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- msleep(10);
-
- if (!adapter->fw_work.work.func)
- return;
-
- cancel_delayed_work_sync(&adapter->fw_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
}
static void
@@ -2496,7 +3649,8 @@ qlcnic_attach_work(struct work_struct *work)
u32 npar_state;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
qlcnic_clr_all_drv_state(adapter, 0);
else if (npar_state != QLCNIC_DEV_NPAR_OPER)
@@ -2508,6 +3662,8 @@ qlcnic_attach_work(struct work_struct *work)
return;
}
attach:
+ qlcnic_dcb_get_info(adapter->dcb);
+
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
goto done;
@@ -2520,6 +3676,8 @@ done:
adapter->fw_fail_cnt = 0;
adapter->flags &= ~QLCNIC_FW_HANG;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
if (!qlcnic_clr_drv_state(adapter))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
@@ -2529,33 +3687,34 @@ done:
static int
qlcnic_check_health(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
u32 state = 0, heartbeat;
u32 peg_status;
+ int err = 0;
if (qlcnic_check_temp(adapter))
goto detach;
if (adapter->need_fw_reset)
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_NEED_RESET) {
qlcnic_set_npar_non_operational(adapter);
adapter->need_fw_reset = 1;
} else if (state == QLCNIC_DEV_NEED_QUISCENT)
goto detach;
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
adapter->heartbeat = heartbeat;
adapter->fw_fail_cnt = 0;
if (adapter->need_fw_reset)
goto detach;
- if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
+ if (ahw->reset_context && qlcnic_auto_fw_reset)
qlcnic_reset_hw_context(adapter);
- adapter->netdev->trans_start = jiffies;
- }
return 0;
}
@@ -2565,25 +3724,25 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
adapter->flags |= QLCNIC_FW_HANG;
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
if (qlcnic_auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
"PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
"PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n",
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
- peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ peg_status,
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
@@ -2597,13 +3756,15 @@ detach:
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+ adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ qlcnic_dump_fw(adapter);
}
return 1;
}
-static void
-qlcnic_fw_poll_work(struct work_struct *work)
+void qlcnic_fw_poll_work(struct work_struct *work)
{
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
@@ -2655,7 +3816,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (err)
return err;
- pci_set_power_state(pdev, PCI_D0);
pci_set_master(pdev);
pci_restore_state(pdev);
@@ -2667,17 +3827,26 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
adapter->need_fw_reset = 1;
set_bit(__QLCNIC_START_FW, &adapter->state);
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
QLCDB(adapter, DRV, "Restarting fw\n");
}
qlcnic_api_unlock(adapter);
- err = adapter->nic_ops->start_firmware(adapter);
+ err = qlcnic_start_firmware(adapter);
if (err)
return err;
qlcnic_clr_drv_state(adapter);
- qlcnic_setup_intr(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = qlcnic_setup_intr(adapter);
+
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
@@ -2699,8 +3868,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
-static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -2730,24 +3899,64 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
PCI_ERS_RESULT_RECOVERED;
}
-static void qlcnic_io_resume(struct pci_dev *pdev)
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
{
+ u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
pci_cleanup_aer_uncorrect_error_status(pdev);
-
- if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
- test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
+ &adapter->state))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
- FW_POLL_DELAY);
+ FW_POLL_DELAY);
+}
+
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_error_detected) {
+ return hw_ops->io_error_detected(pdev, state);
+ } else {
+ dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_slot_reset) {
+ return hw_ops->io_slot_reset(pdev);
+ } else {
+ dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_resume)
+ hw_ops->io_resume(pdev);
+ else
+ dev_err(&pdev->dev, "AER resume handler not registered.\n");
}
+
static int
qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
{
@@ -2776,39 +3985,104 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+ int queue_type)
{
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_info(netdev, "no msix or msi support, hence no rss\n");
+ struct net_device *netdev = adapter->netdev;
+ u8 max_hw_rings = 0;
+ char buf[8];
+ int cur_rings;
+
+ if (queue_type == QLCNIC_RX_QUEUE) {
+ max_hw_rings = adapter->max_sds_rings;
+ cur_rings = adapter->drv_sds_rings;
+ strcpy(buf, "SDS");
+ } else if (queue_type == QLCNIC_TX_QUEUE) {
+ max_hw_rings = adapter->max_tx_rings;
+ cur_rings = adapter->drv_tx_rings;
+ strcpy(buf, "Tx");
+ }
+
+ if (!is_power_of_2(ring_cnt)) {
+ netdev_err(netdev, "%s rings value should be a power of 2\n",
+ buf);
return -EINVAL;
}
- if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
- netdev_info(netdev, "rss_ring valid range [2 - %x] in "
- " powers of 2\n", max_hw);
+ if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi Tx queue support\n");
+ return -EINVAL;
+ }
+
+ if (ring_cnt > num_online_cpus()) {
+ netdev_err(netdev,
+ "%s value[%u] should not be higher than, number of online CPUs\n",
+ buf, num_online_cpus());
return -EINVAL;
}
- return 0;
+ return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int err = 0;
+ u8 tx_rings, rx_rings;
+ int err;
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ tx_rings = adapter->drv_tss_rings;
+ rx_rings = adapter->drv_rss_rings;
+
netif_device_detach(netdev);
+
+ err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+ if (err)
+ goto done;
+
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
+
qlcnic_detach(adapter);
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_enable_mbx_poll(adapter);
+ }
+
qlcnic_teardown_intr(adapter);
- if (qlcnic_enable_msix(adapter, data)) {
- netdev_info(netdev, "failed setting max_rss; rss disabled\n");
- qlcnic_enable_msi_legacy(adapter);
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ /* Check if we need to update real_num_{tx|rx}_queues because
+ * qlcnic_setup_intr() may change Tx/Rx rings size
+ */
+ if ((tx_rings != adapter->drv_tx_rings) ||
+ (rx_rings != adapter->drv_sds_rings)) {
+ err = qlcnic_set_real_num_queues(adapter,
+ adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ goto done;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_initialize_nic(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ goto done;
+ }
}
if (netif_running(netdev)) {
@@ -2820,7 +4094,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
- done:
+done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
@@ -2858,8 +4132,7 @@ qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
in_dev_put(indev);
}
-static void
-qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device *dev;
@@ -2867,19 +4140,21 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
qlcnic_config_indev_addr(adapter, netdev, event);
+ rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
- dev = __vlan_find_dev_deep(netdev, vid);
+ dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
}
+ rcu_read_unlock();
}
static int qlcnic_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct qlcnic_adapter *adapter;
- struct net_device *dev = (struct net_device *)ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
recheck:
if (dev == NULL)
@@ -2940,9 +4215,11 @@ recheck:
switch (event) {
case NETDEV_UP:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+
break;
case NETDEV_DOWN:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+
break;
default:
break;
@@ -2960,11 +4237,10 @@ static struct notifier_block qlcnic_inetaddr_cb = {
.notifier_call = qlcnic_inetaddr_event,
};
#else
-static void
-qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
-static struct pci_error_handlers qlcnic_err_handler = {
+static const struct pci_error_handlers qlcnic_err_handler = {
.error_detected = qlcnic_io_error_detected,
.slot_reset = qlcnic_io_slot_reset,
.resume = qlcnic_io_resume,
@@ -2980,7 +4256,10 @@ static struct pci_driver qlcnic_driver = {
.resume = qlcnic_resume,
#endif
.shutdown = qlcnic_shutdown,
- .err_handler = &qlcnic_err_handler
+ .err_handler = &qlcnic_err_handler,
+#ifdef CONFIG_QLCNIC_SRIOV
+ .sriov_configure = qlcnic_pci_sriov_configure,
+#endif
};
@@ -2990,12 +4269,6 @@ static int __init qlcnic_init_module(void)
printk(KERN_INFO "%s\n", qlcnic_driver_string);
- qlcnic_wq = create_singlethread_workqueue("qlcnic");
- if (qlcnic_wq == NULL) {
- printk(KERN_ERR "qlcnic: cannot create workqueue\n");
- return -ENOMEM;
- }
-
#ifdef CONFIG_INET
register_netdevice_notifier(&qlcnic_netdev_cb);
register_inetaddr_notifier(&qlcnic_inetaddr_cb);
@@ -3007,7 +4280,6 @@ static int __init qlcnic_init_module(void)
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
return ret;
@@ -3017,14 +4289,12 @@ module_init(qlcnic_init_module);
static void __exit qlcnic_exit_module(void)
{
-
pci_unregister_driver(&qlcnic_driver);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 0b8d8625834..e46fc39d425 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1,8 +1,26 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_hw.h"
#include <net/ip.h>
+#define QLC_83XX_MINIDUMP_FLASH 0x520000
+#define QLC_83XX_OCM_INDEX 3
+#define QLC_83XX_PCI_INDEX 0
+#define QLC_83XX_DMA_ENGINE_INDEX 8
+
+static const u32 qlcnic_ms_read_data[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
+};
+
#define QLCNIC_DUMP_WCRB BIT_0
#define QLCNIC_DUMP_RWCRB BIT_1
#define QLCNIC_DUMP_ANDCRB BIT_2
@@ -15,6 +33,16 @@
#define QLCNIC_DUMP_MASK_MAX 0xff
+struct qlcnic_pex_dma_descriptor {
+ u32 read_data_size;
+ u32 dma_desc_cmd;
+ u32 src_addr_low;
+ u32 src_addr_high;
+ u32 dma_bus_addr_low;
+ u32 dma_bus_addr_high;
+ u32 rsvd[6];
+} __packed;
+
struct qlcnic_common_entry_hdr {
u32 type;
u32 offset;
@@ -73,7 +101,10 @@ struct __ocm {
} __packed;
struct __mem {
- u8 rsvd[24];
+ u32 desc_card_addr;
+ u32 dma_desc_cmd;
+ u32 start_dma_cmd;
+ u32 rsvd[3];
u32 addr;
u32 size;
} __packed;
@@ -102,16 +133,55 @@ struct __queue {
u8 rsvd3[2];
} __packed;
+struct __pollrd {
+ u32 sel_addr;
+ u32 read_addr;
+ u32 sel_val;
+ u16 sel_val_stride;
+ u16 no_ops;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 data_size;
+ u8 rsvd[4];
+} __packed;
+
+struct __mux2 {
+ u32 sel_addr1;
+ u32 sel_addr2;
+ u32 sel_val1;
+ u32 sel_val2;
+ u32 no_ops;
+ u32 sel_val_mask;
+ u32 read_addr;
+ u8 sel_val_stride;
+ u8 data_size;
+ u8 rsvd[2];
+} __packed;
+
+struct __pollrdmwr {
+ u32 addr1;
+ u32 addr2;
+ u32 val1;
+ u32 val2;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 mod_mask;
+ u32 data_size;
+} __packed;
+
struct qlcnic_dump_entry {
struct qlcnic_common_entry_hdr hdr;
union {
- struct __crb crb;
- struct __cache cache;
- struct __ocm ocm;
- struct __mem mem;
- struct __mux mux;
- struct __queue que;
- struct __ctrl ctrl;
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ struct __pollrdmwr pollrdmwr;
+ struct __mux2 mux2;
+ struct __pollrd pollrd;
} region;
} __packed;
@@ -131,6 +201,9 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_L2_ITAG = 22,
QLCNIC_DUMP_L2_DATA = 23,
QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_POLL_RD = 35,
+ QLCNIC_READ_MUX2 = 36,
+ QLCNIC_READ_POLLRDMWR = 37,
QLCNIC_DUMP_READ_ROM = 71,
QLCNIC_DUMP_READ_MEM = 72,
QLCNIC_DUMP_READ_CTRL = 98,
@@ -138,52 +211,131 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_RDEND = 255
};
-struct qlcnic_dump_operations {
- enum qlcnic_minidump_opcode opcode;
- u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
- __le32 *);
-};
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr;
-static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
{
- u32 dest;
- void __iomem *window_reg;
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- *data = readl(window_reg);
+ return hdr->cap_sizes[index];
}
-static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
{
- u32 dest;
- void __iomem *window_reg;
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- writel(data, window_reg);
- readl(window_reg);
+ hdr->sys_info[idx] = value;
}
-/* FW dump related functions */
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+ hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+ QLCNIC_TEMPLATE_VERSION;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = tmpl_hdr;
+ hdr->drv_cap_mask = mask;
+}
+
+struct qlcnic_dump_operations {
+ enum qlcnic_minidump_opcode opcode;
+ u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
+ __le32 *);
+};
+
static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
int i;
u32 addr, data;
struct __crb *crb = &entry->region.crb;
- void __iomem *base = adapter->ahw->pci_base0;
addr = crb->addr;
for (i = 0; i < crb->no_ops; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(addr);
*buffer++ = cpu_to_le32(data);
addr += crb->stride;
@@ -194,12 +346,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
+ void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ struct __ctrl *ctr = &entry->region.ctrl;
int i, k, timeout = 0;
- void __iomem *base = adapter->ahw->pci_base0;
- u32 addr, data;
+ u32 addr, data, temp;
u8 no_ops;
- struct __ctrl *ctr = &entry->region.ctrl;
- struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
addr = ctr->addr;
no_ops = ctr->no_ops;
@@ -211,28 +362,28 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
continue;
switch (1 << k) {
case QLCNIC_DUMP_WCRB:
- qlcnic_write_dump_reg(addr, base, ctr->val1);
+ qlcnic_ind_wr(adapter, addr, ctr->val1);
break;
case QLCNIC_DUMP_RWCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base, data);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_ANDCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data & ctr->val2);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data & ctr->val2));
break;
case QLCNIC_DUMP_ORCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data | ctr->val3);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data | ctr->val3));
break;
case QLCNIC_DUMP_POLLCRB:
while (timeout <= ctr->timeout) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
if ((data & ctr->val2) == ctr->val1)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout++;
}
if (timeout > ctr->timeout) {
@@ -242,29 +393,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
}
break;
case QLCNIC_DUMP_RD_SAVE:
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_read_dump_reg(addr, base, &data);
- t_hdr->saved_state[ctr->index_v] = data;
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
case QLCNIC_DUMP_WRT_SAVED:
- if (ctr->index_v)
- data = t_hdr->saved_state[ctr->index_v];
+ temp = ctr->index_v;
+ if (temp)
+ data = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
else
data = ctr->val1;
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_write_dump_reg(addr, base, data);
+
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_MOD_SAVE_ST:
- data = t_hdr->saved_state[ctr->index_v];
+ data = qlcnic_get_saved_state(adapter, hdr,
+ ctr->index_v);
data <<= ctr->shl_val;
data >>= ctr->shr_val;
if (ctr->val2)
data &= ctr->val2;
data |= ctr->val3;
data += ctr->val1;
- t_hdr->saved_state[ctr->index_v] = data;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
default:
dev_info(&adapter->pdev->dev,
@@ -283,12 +447,11 @@ static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
int loop;
u32 val, data = 0;
struct __mux *mux = &entry->region.mux;
- void __iomem *base = adapter->ahw->pci_base0;
val = mux->val;
for (loop = 0; loop < mux->no_ops; loop++) {
- qlcnic_write_dump_reg(mux->addr, base, val);
- qlcnic_read_dump_reg(mux->read_addr, base, &data);
+ qlcnic_ind_wr(adapter, mux->addr, val);
+ data = qlcnic_ind_rd(adapter, mux->read_addr);
*buffer++ = cpu_to_le32(val);
*buffer++ = cpu_to_le32(data);
val += mux->val_stride;
@@ -301,17 +464,16 @@ static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
{
int i, loop;
u32 cnt, addr, data, que_id = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __queue *que = &entry->region.que;
addr = que->read_addr;
cnt = que->read_addr_cnt;
for (loop = 0; loop < que->no_ops; loop++) {
- qlcnic_write_dump_reg(que->sel_addr, base, que_id);
+ qlcnic_ind_wr(adapter, que->sel_addr, que_id);
addr = que->read_addr;
for (i = 0; i < cnt; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += que->read_addr_stride;
}
@@ -343,27 +505,27 @@ static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
int i, count = 0;
u32 fl_addr, size, val, lck_val, addr;
struct __mem *rom = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
fl_addr = rom->addr;
- size = rom->size/4;
+ size = rom->size / 4;
lock_try:
- lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+ lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
if (!lck_val && count < MAX_CTL_CHECK) {
- msleep(10);
+ usleep_range(10000, 11000);
count++;
goto lock_try;
}
- writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ adapter->ahw->pci_func);
for (i = 0; i < size; i++) {
addr = fl_addr & 0xFFFF0000;
- qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
+ qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
addr = LSW(fl_addr) + FLASH_ROM_DATA;
- qlcnic_read_dump_reg(addr, base, &val);
+ val = qlcnic_ind_rd(adapter, addr);
fl_addr += 4;
*buffer++ = cpu_to_le32(val);
}
- readl(base + QLCNIC_FLASH_SEM2_ULK);
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
return rom->size;
}
@@ -372,18 +534,17 @@ static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
{
int i;
u32 cnt, val, data, addr;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l1 = &entry->region.cache;
val = l1->init_tag_val;
for (i = 0; i < l1->no_ops; i++) {
- qlcnic_write_dump_reg(l1->addr, base, val);
- qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+ qlcnic_ind_wr(adapter, l1->addr, val);
+ qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
addr = l1->read_addr;
cnt = l1->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l1->read_addr_stride;
cnt--;
@@ -399,7 +560,6 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
int i;
u32 cnt, val, data, addr;
u8 poll_mask, poll_to, time_out = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l2 = &entry->region.cache;
val = l2->init_tag_val;
@@ -407,17 +567,17 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
poll_to = MSB(MSW(l2->ctrl_val));
for (i = 0; i < l2->no_ops; i++) {
- qlcnic_write_dump_reg(l2->addr, base, val);
+ qlcnic_ind_wr(adapter, l2->addr, val);
if (LSW(l2->ctrl_val))
- qlcnic_write_dump_reg(l2->ctrl_addr, base,
- LSW(l2->ctrl_val));
+ qlcnic_ind_wr(adapter, l2->ctrl_addr,
+ LSW(l2->ctrl_val));
if (!poll_mask)
goto skip_poll;
do {
- qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
+ data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
if (!(data & poll_mask))
break;
- msleep(1);
+ usleep_range(1000, 2000);
time_out++;
} while (time_out <= poll_to);
@@ -431,7 +591,7 @@ skip_poll:
addr = l2->read_addr;
cnt = l2->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l2->read_addr_stride;
cnt--;
@@ -441,13 +601,12 @@ skip_poll:
return l2->no_ops * l2->read_addr_num * sizeof(u32);
}
-static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
- struct qlcnic_dump_entry *entry, __le32 *buffer)
+static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
+ struct __mem *mem, __le32 *buffer,
+ int *ret)
{
- u32 addr, data, test, ret = 0;
+ u32 addr, data, test;
int i, reg_read;
- struct __mem *mem = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
reg_read = mem->size;
addr = mem->addr;
@@ -456,19 +615,19 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
dev_info(&adapter->pdev->dev,
"Unaligned memory addr:0x%x size:0x%x\n",
addr, reg_read);
- return -EINVAL;
+ *ret = -EINVAL;
+ return 0;
}
mutex_lock(&adapter->ahw->mem_lock);
while (reg_read != 0) {
- qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
- qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
- qlcnic_write_dump_reg(MIU_TEST_CTR, base,
- TA_CTL_ENABLE | TA_CTL_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
for (i = 0; i < MAX_CTL_CHECK; i++) {
- qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
+ test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if (!(test & TA_CTL_BUSY))
break;
}
@@ -476,13 +635,12 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
if (printk_ratelimit()) {
dev_err(&adapter->pdev->dev,
"failed to read through agent\n");
- ret = -EINVAL;
+ *ret = -EIO;
goto out;
}
}
for (i = 0; i < 4; i++) {
- qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
- &data);
+ data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
*buffer++ = cpu_to_le32(data);
}
addr += 16;
@@ -494,6 +652,179 @@ out:
return mem->size;
}
+/* DMA register base address */
+#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
+
+/* DMA register offsets w.r.t base address */
+#define QLC_DMA_CMD_BUFF_ADDR_LOW 0
+#define QLC_DMA_CMD_BUFF_ADDR_HI 4
+#define QLC_DMA_CMD_STATUS_CTRL 8
+
+static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
+ struct __mem *mem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 dma_no, dma_base_addr, temp_addr;
+ int i, ret, dma_sts;
+ void *tmpl_hdr;
+
+ tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
+ dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
+ if (ret)
+ return ret;
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
+ ret = qlcnic_ind_wr(adapter, temp_addr, 0);
+ if (ret)
+ return ret;
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
+ if (ret)
+ return ret;
+
+ /* Wait for DMA to complete */
+ temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+ for (i = 0; i < 400; i++) {
+ dma_sts = qlcnic_ind_rd(adapter, temp_addr);
+
+ if (dma_sts & BIT_1)
+ usleep_range(250, 500);
+ else
+ break;
+ }
+
+ if (i >= 400) {
+ dev_info(dev, "PEX DMA operation timed out");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
+ struct __mem *mem,
+ __le32 *buffer, int *ret)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 temp, dma_base_addr, size = 0, read_size = 0;
+ struct qlcnic_pex_dma_descriptor *dma_descr;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t dma_phys_addr;
+ void *dma_buffer;
+ void *tmpl_hdr;
+
+ tmpl_hdr = fw_dump->tmpl_hdr;
+
+ /* Check if DMA engine is available */
+ temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
+ dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
+ temp = qlcnic_ind_rd(adapter,
+ dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
+
+ if (!(temp & BIT_31)) {
+ dev_info(dev, "%s: DMA engine is not available\n", __func__);
+ *ret = -EIO;
+ return 0;
+ }
+
+ /* Create DMA descriptor */
+ dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
+ GFP_KERNEL);
+ if (!dma_descr) {
+ *ret = -ENOMEM;
+ return 0;
+ }
+
+ /* dma_desc_cmd 0:15 = 0
+ * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
+ * dma_desc_cmd 20:23 = pci function number
+ * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
+ */
+ dma_phys_addr = fw_dump->phys_addr;
+ dma_buffer = fw_dump->dma_buffer;
+ temp = 0;
+ temp = mem->dma_desc_cmd & 0xff0f;
+ temp |= (adapter->ahw->pci_func & 0xf) << 4;
+ dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
+ dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
+ dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
+ dma_descr->src_addr_high = 0;
+
+ /* Collect memory dump using multiple DMA operations if required */
+ while (read_size < mem->size) {
+ if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
+ size = QLC_PEX_DMA_READ_SIZE;
+ else
+ size = mem->size - read_size;
+
+ dma_descr->src_addr_low = mem->addr + read_size;
+ dma_descr->read_data_size = size;
+
+ /* Write DMA descriptor to MS memory*/
+ temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
+ *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+ (u32 *)dma_descr, temp);
+ if (*ret) {
+ dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
+ mem->desc_card_addr);
+ goto free_dma_descr;
+ }
+
+ *ret = qlcnic_start_pex_dma(adapter, mem);
+ if (*ret) {
+ dev_info(dev, "Failed to start PEX DMA operation\n");
+ goto free_dma_descr;
+ }
+
+ memcpy(buffer, dma_buffer, size);
+ buffer += size / 4;
+ read_size += size;
+ }
+
+free_dma_descr:
+ kfree(dma_descr);
+
+ return read_size;
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct device *dev = &adapter->pdev->dev;
+ struct __mem *mem = &entry->region.mem;
+ u32 data_size;
+ int ret = 0;
+
+ if (fw_dump->use_pex_dma) {
+ data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
+ &ret);
+ if (ret)
+ dev_info(dev,
+ "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
+ entry->hdr.mask);
+ else
+ return data_size;
+ }
+
+ data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
+ if (ret) {
+ dev_info(dev,
+ "Failed to read memory dump using test agent method: mask[0x%x]\n",
+ entry->hdr.mask);
+ return 0;
+ } else {
+ return data_size;
+ }
+}
+
static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
@@ -501,90 +832,473 @@ static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
return 0;
}
-static const struct qlcnic_dump_operations fw_dump_ops[] = {
- { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
- { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
- { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
- { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
- { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
- { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
- { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
- { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
-};
-
-/* Walk the template and collect dump for each entry in the dump template */
-static int
-qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
- u32 size)
+static int qlcnic_valid_dump_entry(struct device *dev,
+ struct qlcnic_dump_entry *entry, u32 size)
{
int ret = 1;
if (size != entry->hdr.cap_size) {
- dev_info(dev,
- "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
- entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
- dev_info(dev, "Aborting further dump capture\n");
+ dev_err(dev,
+ "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size,
+ entry->hdr.cap_size);
ret = 0;
}
return ret;
}
+static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry,
+ __le32 *buffer)
+{
+ struct __pollrdmwr *poll = &entry->region.pollrdmwr;
+ u32 data, wait_count, poll_wait, temp;
+
+ poll_wait = poll->poll_wait;
+
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((data & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
+ qlcnic_ind_wr(adapter, poll->addr2, data);
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ temp = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((temp & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ *buffer++ = cpu_to_le32(poll->addr2);
+ *buffer++ = cpu_to_le32(data);
+
+ return 2 * sizeof(u32);
+
+}
+
+static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __pollrd *pollrd = &entry->region.pollrd;
+ u32 data, wait_count, poll_wait, sel_val;
+ int i;
+
+ poll_wait = pollrd->poll_wait;
+ sel_val = pollrd->sel_val;
+
+ for (i = 0; i < pollrd->no_ops; i++) {
+ qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
+ wait_count = 0;
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
+ if ((data & pollrd->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, pollrd->read_addr);
+ *buffer++ = cpu_to_le32(sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val += pollrd->sel_val_stride;
+ }
+ return pollrd->no_ops * (2 * sizeof(u32));
+}
+
+static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __mux2 *mux2 = &entry->region.mux2;
+ u32 data;
+ u32 t_sel_val, sel_val1, sel_val2;
+ int i;
+
+ sel_val1 = mux2->sel_val1;
+ sel_val2 = mux2->sel_val2;
+
+ for (i = 0; i < mux2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val1 += mux2->sel_val_stride;
+ sel_val2 += mux2->sel_val_stride;
+ }
+
+ return mux2->no_ops * (4 * sizeof(u32));
+}
+
+static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ u32 fl_addr, size;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+
+ if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
+ (u8 *)buffer, size))
+ return rom->size;
+
+ return 0;
+}
+
+static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
+ {QLCNIC_READ_MUX2, qlcnic_read_mux2},
+ {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u8 *buffer, u32 size)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ buffer, size / sizeof(u32));
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return ret;
+}
+
+static int
+qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(tmp_hdr) / sizeof(u32);
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ (u8 *)&tmp_hdr, size);
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ cmd->rsp.arg[2] = tmp_hdr.size;
+ cmd->rsp.arg[3] = tmp_hdr.version;
+
+ return ret;
+}
+
+static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ u32 *version, u32 *temp_size,
+ u8 *use_flash_temp)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
+ return -ENOMEM;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
+ qlcnic_free_mbx_args(&cmd);
+ return -EIO;
+ }
+ *use_flash_temp = 1;
+ }
+
+ *temp_size = cmd.rsp.arg[2];
+ *version = cmd.rsp.arg[3];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (!(*temp_size))
+ return -EIO;
+
+ return 0;
+}
+
+static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u32 *buffer, u32 temp_size)
+{
+ int err = 0, i;
+ void *tmp_addr;
+ __le32 *tmp_buf;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t tmp_addr_t = 0;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr)
+ return -ENOMEM;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ cmd.req.arg[1] = LSD(tmp_addr_t);
+ cmd.req.arg[2] = MSD(tmp_addr_t);
+ cmd.req.arg[3] = temp_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ tmp_buf = tmp_addr;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < temp_size / sizeof(u32); i++)
+ *buffer++ = __le32_to_cpu(*tmp_buf++);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+free_mem:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+
+ return err;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_fw_dump *fw_dump;
+ u32 version, csum, *tmp_buf;
+ u8 use_flash_temp = 0;
+ u32 temp_size = 0;
+ void *temp_buffer;
+ int err;
+
+ ahw = adapter->ahw;
+ fw_dump = &ahw->fw_dump;
+ err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
+ &use_flash_temp);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get template size %d\n", err);
+ return -EIO;
+ }
+
+ fw_dump->tmpl_hdr = vzalloc(temp_size);
+ if (!fw_dump->tmpl_hdr)
+ return -ENOMEM;
+
+ tmp_buf = (u32 *)fw_dump->tmpl_hdr;
+ if (use_flash_temp)
+ goto flash_temp;
+
+ err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
+
+ if (err) {
+flash_temp:
+ err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
+ temp_size);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get minidump template header %d\n",
+ err);
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ return -EIO;
+ }
+ }
+
+ csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
+
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ return -EIO;
+ }
+
+ qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
+ if (fw_dump->use_pex_dma) {
+ fw_dump->dma_buffer = NULL;
+ temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ QLC_PEX_DMA_READ_SIZE,
+ &fw_dump->phys_addr,
+ GFP_KERNEL);
+ if (!temp_buffer)
+ fw_dump->use_pex_dma = false;
+ else
+ fw_dump->dma_buffer = temp_buffer;
+ }
+
+
+ dev_info(&adapter->pdev->dev,
+ "Default minidump capture mask 0x%x\n",
+ fw_dump->cap_mask);
+
+ qlcnic_enable_fw_dump_state(adapter);
+
+ return 0;
+}
+
int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ int i, k, ops_cnt, ops_index, dump_size = 0;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_entry *entry;
+ void *tmpl_hdr;
+ u32 ocm_window;
__le32 *buffer;
char mesg[64];
char *msg[] = {mesg, NULL};
- int i, k, ops_cnt, ops_index, dump_size = 0;
- u32 entry_offset, dump, no_entries, buf_offset = 0;
- struct qlcnic_dump_entry *entry;
- struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+
+ ahw = adapter->ahw;
+ tmpl_hdr = fw_dump->tmpl_hdr;
+
+ /* Return if we don't have firmware dump template header */
+ if (!tmpl_hdr)
+ return -EIO;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ dev_info(&adapter->pdev->dev, "Dump not enabled\n");
+ return -EIO;
+ }
if (fw_dump->clr) {
dev_info(&adapter->pdev->dev,
"Previous dump not cleared, not capturing dump\n");
return -EIO;
}
+
+ netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
/* Calculate the size for dump data area only */
for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
- if (i & tmpl_hdr->drv_cap_mask)
- dump_size += tmpl_hdr->cap_sizes[k];
+ if (i & fw_dump->cap_mask)
+ dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
if (!dump_size)
return -EIO;
fw_dump->data = vzalloc(dump_size);
- if (!fw_dump->data) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate (%d KB) for fw dump\n",
- dump_size / 1024);
+ if (!fw_dump->data)
return -ENOMEM;
- }
+
buffer = fw_dump->data;
fw_dump->size = dump_size;
- no_entries = tmpl_hdr->num_entries;
- ops_cnt = ARRAY_SIZE(fw_dump_ops);
- entry_offset = tmpl_hdr->offset;
- tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
- tmpl_hdr->sys_info[1] = adapter->fw_version;
+ no_entries = fw_dump->num_entries;
+ entry_offset = fw_dump->offset;
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
+
+ if (qlcnic_82xx_check(adapter)) {
+ ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
+ fw_dump_ops = qlcnic_fw_dump_ops;
+ } else {
+ hdr_83xx = tmpl_hdr;
+ ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
+ fw_dump_ops = qlcnic_83xx_fw_dump_ops;
+ ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+ hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ }
for (i = 0; i < no_entries; i++) {
- entry = (void *)tmpl_hdr + entry_offset;
- if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+ entry = tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & fw_dump->cap_mask)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
entry_offset += entry->hdr.offset;
continue;
}
+
/* Find the handler for this entry */
ops_index = 0;
while (ops_index < ops_cnt) {
@@ -592,37 +1306,53 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
break;
ops_index++;
}
+
if (ops_index == ops_cnt) {
- dev_info(&adapter->pdev->dev,
- "Invalid entry type %d, exiting dump\n",
+ dev_info(dev, "Skipping unknown entry opcode %d\n",
entry->hdr.type);
- goto error;
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
}
+
/* Collect dump for this entry */
dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
- dump))
+ if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
}
- if (dump_size != buf_offset) {
- dev_info(&adapter->pdev->dev,
- "Captured(%d) and expected size(%d) do not match\n",
- buf_offset, dump_size);
- goto error;
- } else {
- fw_dump->clr = 1;
- snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
- adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
- fw_dump->size);
- /* Send a udev event to notify availability of FW dump */
- kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
- return 0;
+
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+ netdev_info(adapter->netdev,
+ "Dump data %d bytes captured, template header size %d bytes\n",
+ fw_dump->size, fw_dump->tmpl_hdr_size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
+ return 0;
+}
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+{
+ u32 prev_version, current_version;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ struct pci_dev *pdev = adapter->pdev;
+
+ prev_version = adapter->fw_version;
+ current_version = qlcnic_83xx_get_fw_version(adapter);
+
+ if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+ if (fw_dump->tmpl_hdr)
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev, "Supports FW dump capability\n");
}
-error:
- vfree(fw_dump->data);
- return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644
index 00000000000..4677b2edccc
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -0,0 +1,276 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_83XX_SRIOV_H_
+#define _QLCNIC_83XX_SRIOV_H_
+
+#include "qlcnic.h"
+#include <linux/types.h>
+#include <linux/pci.h>
+
+extern const u32 qlcnic_83xx_reg_tbl[];
+extern const u32 qlcnic_83xx_ext_reg_tbl[];
+
+struct qlcnic_bc_payload {
+ u64 payload[126];
+};
+
+struct qlcnic_bc_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u8 version;
+ u8 msg_type:4;
+ u8 rsvd1:3;
+ u8 op_type:1;
+ u8 num_cmds;
+ u8 num_frags;
+ u8 frag_num;
+ u8 cmd_op;
+ u16 seq_id;
+ u64 rsvd3;
+#elif defined(__BIG_ENDIAN)
+ u8 num_frags;
+ u8 num_cmds;
+ u8 op_type:1;
+ u8 rsvd1:3;
+ u8 msg_type:4;
+ u8 version;
+ u16 seq_id;
+ u8 cmd_op;
+ u8 frag_num;
+ u64 rsvd3;
+#endif
+};
+
+enum qlcnic_bc_commands {
+ QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
+ QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
+ QLCNIC_BC_CMD_GET_ACL = 0x2,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
+};
+
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
+#define QLC_BC_CMD 1
+
+struct qlcnic_trans_list {
+ /* Lock for manipulating list */
+ spinlock_t lock;
+ struct list_head wait_list;
+ int count;
+};
+
+enum qlcnic_trans_state {
+ QLC_INIT = 0,
+ QLC_WAIT_FOR_CHANNEL_FREE,
+ QLC_WAIT_FOR_RESP,
+ QLC_ABORT,
+ QLC_END,
+};
+
+struct qlcnic_bc_trans {
+ u8 func_id;
+ u8 active;
+ u8 curr_rsp_frag;
+ u8 curr_req_frag;
+ u16 cmd_id;
+ u16 req_pay_size;
+ u16 rsp_pay_size;
+ u32 trans_id;
+ enum qlcnic_trans_state trans_state;
+ struct list_head list;
+ struct qlcnic_bc_hdr *req_hdr;
+ struct qlcnic_bc_hdr *rsp_hdr;
+ struct qlcnic_bc_payload *req_pay;
+ struct qlcnic_bc_payload *rsp_pay;
+ struct completion resp_cmpl;
+ struct qlcnic_vf_info *vf;
+};
+
+enum qlcnic_vf_state {
+ QLC_BC_VF_SEND = 0,
+ QLC_BC_VF_RECV,
+ QLC_BC_VF_CHANNEL,
+ QLC_BC_VF_STATE,
+ QLC_BC_VF_FLR,
+ QLC_BC_VF_SOFT_FLR,
+};
+
+enum qlcnic_vlan_mode {
+ QLC_NO_VLAN_MODE = 0,
+ QLC_PVID_MODE,
+ QLC_GUEST_VLAN_MODE,
+};
+
+struct qlcnic_resources {
+ u16 num_tx_mac_filters;
+ u16 num_rx_ucast_mac_filters;
+ u16 num_rx_mcast_mac_filters;
+
+ u16 num_txvlan_keys;
+
+ u16 num_rx_queues;
+ u16 num_tx_queues;
+
+ u16 num_rx_buf_rings;
+ u16 num_rx_status_rings;
+
+ u16 num_destip;
+ u32 num_lro_flows_supported;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_vport {
+ u16 handle;
+ u16 max_tx_bw;
+ u16 min_tx_bw;
+ u16 pvid;
+ u8 vlan_mode;
+ u8 qos;
+ bool spoofchk;
+ u8 mac[6];
+};
+
+struct qlcnic_vf_info {
+ u8 pci_func;
+ u16 rx_ctx_id;
+ u16 tx_ctx_id;
+ u16 *sriov_vlans;
+ int num_vlan;
+ unsigned long state;
+ struct completion ch_free_cmpl;
+ struct work_struct trans_work;
+ struct work_struct flr_work;
+ /* It synchronizes commands sent from VF */
+ struct mutex send_cmd_lock;
+ struct qlcnic_bc_trans *send_cmd;
+ struct qlcnic_bc_trans *flr_trans;
+ struct qlcnic_trans_list rcv_act;
+ struct qlcnic_trans_list rcv_pend;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_vport *vp;
+ spinlock_t vlan_list_lock; /* Lock for VLAN list */
+};
+
+struct qlcnic_async_work_list {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+ struct qlcnic_cmd_args *cmd;
+};
+
+struct qlcnic_back_channel {
+ u16 trans_counter;
+ struct workqueue_struct *bc_trans_wq;
+ struct workqueue_struct *bc_async_wq;
+ struct workqueue_struct *bc_flr_wq;
+ struct list_head async_list;
+};
+
+struct qlcnic_sriov {
+ u16 vp_handle;
+ u8 num_vfs;
+ u8 any_vlan;
+ u8 vlan_mode;
+ u16 num_allowed_vlans;
+ u16 *allowed_vlans;
+ u16 vlan;
+ struct qlcnic_resources ff_max;
+ struct qlcnic_back_channel bc;
+ struct qlcnic_vf_info *vf_info;
+};
+
+int qlcnic_sriov_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *);
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
+ struct qlcnic_bc_trans *);
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u16);
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+
+static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
+}
+
+#ifdef CONFIG_QLCNIC_SRIOV
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_cmd_args *);
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
+int qlcnic_pci_sriov_configure(struct pci_dev *, int);
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *, struct qlcnic_vf_info *);
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_vf_info *);
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
+int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
+int qlcnic_sriov_get_vf_config(struct net_device *, int ,
+ struct ifla_vf_info *);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
+#else
+static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
+static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
+static inline void
+qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf) {}
+static inline bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{ return false; }
+static inline void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) {}
+static inline int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644
index 00000000000..1659c804f1d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -0,0 +1,2209 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_83xx_hw.h"
+#include <linux/types.h>
+
+#define QLC_BC_COMMAND 0
+#define QLC_BC_RESPONSE 1
+
+#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
+#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
+
+#define QLC_BC_MSG 0
+#define QLC_BC_CFREE 1
+#define QLC_BC_FLR 2
+#define QLC_BC_HDR_SZ 16
+#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
+
+#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
+#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
+
+#define QLC_83XX_VF_RESET_FAIL_THRESH 8
+#define QLC_BC_CMD_MAX_RETRY_CNT 5
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+
+static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_sriov_issue_cmd,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+ .free_mac_list = qlcnic_sriov_vf_free_mac_list,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+};
+
+static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .shutdown = qlcnic_sriov_vf_shutdown,
+ .resume = qlcnic_sriov_vf_resume,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
+ {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
+ {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
+ {QLCNIC_BC_CMD_GET_ACL, 3, 14},
+ {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
+};
+
+static inline bool qlcnic_sriov_bc_msg_check(u32 val)
+{
+ return (val & (1 << QLC_BC_MSG)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_channel_free_check(u32 val)
+{
+ return (val & (1 << QLC_BC_CFREE)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_flr_check(u32 val)
+{
+ return (val & (1 << QLC_BC_FLR)) ? true : false;
+}
+
+static inline u8 qlcnic_sriov_target_func_id(u32 val)
+{
+ return (val >> 4) & 0xff;
+}
+
+static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
+{
+ struct pci_dev *dev = adapter->pdev;
+ int pos;
+ u16 stride, offset;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+
+ return (dev->devfn + offset + stride * vf_id) & 0xff;
+}
+
+int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct qlcnic_sriov *sriov;
+ struct qlcnic_back_channel *bc;
+ struct workqueue_struct *wq;
+ struct qlcnic_vport *vp;
+ struct qlcnic_vf_info *vf;
+ int err, i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return -EIO;
+
+ sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
+ if (!sriov)
+ return -ENOMEM;
+
+ adapter->ahw->sriov = sriov;
+ sriov->num_vfs = num_vfs;
+ bc = &sriov->bc;
+ sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
+ num_vfs, GFP_KERNEL);
+ if (!sriov->vf_info) {
+ err = -ENOMEM;
+ goto qlcnic_free_sriov;
+ }
+
+ wq = create_singlethread_workqueue("bc-trans");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Cannot create bc-trans workqueue\n");
+ goto qlcnic_free_vf_info;
+ }
+
+ bc->bc_trans_wq = wq;
+
+ wq = create_singlethread_workqueue("async");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
+ goto qlcnic_destroy_trans_wq;
+ }
+
+ bc->bc_async_wq = wq;
+ INIT_LIST_HEAD(&bc->async_list);
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->adapter = adapter;
+ vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
+ mutex_init(&vf->send_cmd_lock);
+ spin_lock_init(&vf->vlan_list_lock);
+ INIT_LIST_HEAD(&vf->rcv_act.wait_list);
+ INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
+ spin_lock_init(&vf->rcv_act.lock);
+ spin_lock_init(&vf->rcv_pend.lock);
+ init_completion(&vf->ch_free_cmpl);
+
+ INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
+
+ if (qlcnic_sriov_pf_check(adapter)) {
+ vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
+ if (!vp) {
+ err = -ENOMEM;
+ goto qlcnic_destroy_async_wq;
+ }
+ sriov->vf_info[i].vp = vp;
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+ vp->max_tx_bw = MAX_BW;
+ vp->min_tx_bw = MIN_BW;
+ vp->spoofchk = false;
+ random_ether_addr(vp->mac);
+ dev_info(&adapter->pdev->dev,
+ "MAC Address %pM is configured for VF %d\n",
+ vp->mac, i);
+ }
+ }
+
+ return 0;
+
+qlcnic_destroy_async_wq:
+ destroy_workqueue(bc->bc_async_wq);
+
+qlcnic_destroy_trans_wq:
+ destroy_workqueue(bc->bc_trans_wq);
+
+qlcnic_free_vf_info:
+ kfree(sriov->vf_info);
+
+qlcnic_free_sriov:
+ kfree(adapter->ahw->sriov);
+ return err;
+}
+
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_cmd_args cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&t_list->lock, flags);
+
+ while (!list_empty(&t_list->wait_list)) {
+ trans = list_first_entry(&t_list->wait_list,
+ struct qlcnic_bc_trans, list);
+ list_del(&trans->list);
+ t_list->count--;
+ cmd.req.arg = (u32 *)trans->req_pay;
+ cmd.rsp.arg = (u32 *)trans->rsp_pay;
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+
+ spin_unlock_irqrestore(&t_list->lock, flags);
+}
+
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_cleanup_async_list(bc);
+ destroy_workqueue(bc->bc_async_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+ }
+
+ destroy_workqueue(bc->bc_trans_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ kfree(sriov->vf_info[i].vp);
+
+ kfree(sriov->vf_info);
+ kfree(adapter->ahw->sriov);
+}
+
+static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
+{
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+}
+
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_sriov_free_vlans(adapter);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_cleanup(adapter);
+}
+
+static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
+ u32 *pay, u8 pci_func, u8 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct qlcnic_cmd_args cmd;
+ unsigned long timeout;
+ int err;
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd.hdr = hdr;
+ cmd.pay = pay;
+ cmd.pay_size = size;
+ cmd.func_num = pci_func;
+ cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+ cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
+
+ err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
+ }
+
+ if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
+ }
+
+ return cmd.rsp_opcode;
+}
+
+static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
+{
+ adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u16 vport_id)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err;
+ u32 status;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = vport_id << 16 | 0x1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get vport info, err=%d\n", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+
+ status = cmd.rsp.arg[2] & 0xffff;
+ if (status & BIT_0)
+ npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
+ if (status & BIT_1)
+ npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
+ if (status & BIT_2)
+ npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
+ if (status & BIT_3)
+ npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
+ if (status & BIT_4)
+ npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
+ if (status & BIT_5)
+ npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
+ if (status & BIT_6)
+ npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
+ if (status & BIT_7)
+ npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
+ if (status & BIT_8)
+ npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
+ if (status & BIT_9)
+ npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
+
+ npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
+ npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
+
+ dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
+ npar_info->min_tx_bw, npar_info->max_tx_bw,
+ npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ return 0;
+}
+
+static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vlans;
+ u16 *vlans;
+
+ if (sriov->allowed_vlans)
+ return 0;
+
+ sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+ sriov->num_allowed_vlans);
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
+ if (!sriov->any_vlan)
+ return 0;
+
+ num_vlans = sriov->num_allowed_vlans;
+ sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
+ if (!sriov->allowed_vlans)
+ return -ENOMEM;
+
+ vlans = (u16 *)&cmd->rsp.arg[3];
+ for (i = 0; i < num_vlans; i++)
+ sriov->allowed_vlans[i] = vlans[i];
+
+ return 0;
+}
+
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
+ ret);
+ } else {
+ sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
+ switch (sriov->vlan_mode) {
+ case QLC_GUEST_VLAN_MODE:
+ ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
+ break;
+ case QLC_PVID_MODE:
+ ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
+ break;
+ }
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info;
+ int err;
+
+ err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
+ if (err)
+ return err;
+
+ ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return -EIO;
+
+ qlcnic_sriov_vf_cfg_buff_desc(adapter);
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->op_mode = nic_info.op_mode;
+ ahw->capabilities = nic_info.capabilities;
+ return 0;
+}
+
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
+ int pci_using_dac)
+{
+ int err;
+
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ adapter->ahw->total_nic_func = 1;
+ INIT_LIST_HEAD(&adapter->vf_mc_list);
+ if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
+ dev_warn(&adapter->pdev->dev,
+ "Device does not support MSI interrupts\n");
+
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+
+ err = qlcnic_sriov_init(adapter, 1);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto err_out_cleanup_sriov;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_disable_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_sriov_get_vf_acl(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
+ if (err)
+ goto err_out_send_channel_term;
+
+ pci_set_drvdata(adapter->pdev, adapter);
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ adapter->ahw->idc.delay);
+ return 0;
+
+err_out_send_channel_term:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_disable_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+
+err_out_cleanup_sriov:
+ __qlcnic_sriov_cleanup(adapter);
+
+err_out_disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ return err;
+}
+
+static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ do {
+ msleep(20);
+ if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
+ return -EIO;
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ } while (state != QLC_83XX_IDC_DEV_READY);
+
+ return 0;
+}
+
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->reset_context = 0;
+ adapter->fw_fail_cnt = 0;
+ ahw->msix_supported = 1;
+ adapter->need_fw_reset = 0;
+ adapter->flags |= QLCNIC_TX_INTR_SHARED;
+
+ err = qlcnic_sriov_check_dev_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
+ if (err)
+ return err;
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged SRIOV function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_sriov_vf_ops;
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ return;
+}
+
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
+{
+ u32 pay_size;
+
+ pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
+
+ if (pay_size)
+ pay_size = QLC_BC_PAYLOAD_SZ;
+ else
+ pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
+
+ return pay_size;
+}
+
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
+ u8 i;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
+ if (vf_info[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
+{
+ *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
+ if (!*trans)
+ return -ENOMEM;
+
+ init_completion(&(*trans)->resp_cmpl);
+ return 0;
+}
+
+static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
+ u32 size)
+{
+ *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
+ if (!*hdr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
+{
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+ int i, size;
+
+ mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
+
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_BC_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) |
+ (3 << 29));
+ mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd,
+ u16 seq, u8 msg_type)
+{
+ struct qlcnic_bc_hdr *hdr;
+ int i;
+ u32 num_regs, bc_pay_sz;
+ u16 remainder;
+ u8 cmd_op, num_frags, t_num_frags;
+
+ bc_pay_sz = QLC_BC_PAYLOAD_SZ;
+ if (msg_type == QLC_BC_COMMAND) {
+ trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
+ trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
+ num_regs = cmd->req.num;
+ trans->req_pay_size = (num_regs * 4);
+ num_regs = cmd->rsp.num;
+ trans->rsp_pay_size = (num_regs * 4);
+ cmd_op = cmd->req.arg[0] & 0xff;
+ remainder = (trans->req_pay_size) % (bc_pay_sz);
+ num_frags = (trans->req_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ t_num_frags = num_frags;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
+ return -ENOMEM;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
+ return -ENOMEM;
+ num_frags = t_num_frags;
+ hdr = trans->req_hdr;
+ } else {
+ cmd->req.arg = (u32 *)trans->req_pay;
+ cmd->rsp.arg = (u32 *)trans->rsp_pay;
+ cmd_op = cmd->req.arg[0] & 0xff;
+ cmd->cmd_op = cmd_op;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ cmd->req.num = trans->req_pay_size / 4;
+ cmd->rsp.num = trans->rsp_pay_size / 4;
+ hdr = trans->rsp_hdr;
+ cmd->op_type = trans->req_hdr->op_type;
+ }
+
+ trans->trans_id = seq;
+ trans->cmd_id = cmd_op;
+ for (i = 0; i < num_frags; i++) {
+ hdr[i].version = 2;
+ hdr[i].msg_type = msg_type;
+ hdr[i].op_type = cmd->op_type;
+ hdr[i].num_cmds = 1;
+ hdr[i].num_frags = num_frags;
+ hdr[i].frag_num = i + 1;
+ hdr[i].cmd_op = cmd_op;
+ hdr[i].seq_id = seq;
+ }
+ return 0;
+}
+
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
+{
+ if (!trans)
+ return;
+ kfree(trans->req_hdr);
+ kfree(trans->rsp_hdr);
+ kfree(trans);
+}
+
+static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_trans_list *t_list;
+ unsigned long flags;
+ int ret = 0;
+
+ if (type == QLC_BC_RESPONSE) {
+ t_list = &vf->rcv_act;
+ spin_lock_irqsave(&t_list->lock, flags);
+ t_list->count--;
+ list_del(&trans->list);
+ if (t_list->count > 0)
+ ret = 1;
+ spin_unlock_irqrestore(&t_list->lock, flags);
+ }
+ if (type == QLC_BC_COMMAND) {
+ while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ msleep(100);
+ vf->send_cmd = NULL;
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+ }
+ return ret;
+}
+
+static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ return;
+
+ queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
+}
+
+static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
+{
+ struct completion *cmpl = &trans->resp_cmpl;
+
+ if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
+ trans->trans_state = QLC_END;
+ else
+ trans->trans_state = QLC_ABORT;
+
+ return;
+}
+
+static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ if (type == QLC_BC_RESPONSE) {
+ trans->curr_rsp_frag++;
+ if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_END;
+ } else {
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag < trans->req_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_WAIT_FOR_RESP;
+ }
+}
+
+static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct completion *cmpl = &vf->ch_free_cmpl;
+
+ if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
+ trans->trans_state = QLC_ABORT;
+ return;
+ }
+
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ qlcnic_sriov_handle_multi_frags(trans, type);
+}
+
+static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
+ u32 *hdr, u32 *pay, u32 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 fw_mbx;
+ u8 i, max = 2, hdr_size, j;
+
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ max = (size / sizeof(u32)) + hdr_size;
+
+ fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
+ for (; j < max; i++, j++)
+ *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
+}
+
+static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
+{
+ int ret = -EBUSY;
+ u32 timeout = 10000;
+
+ do {
+ if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
+ ret = 0;
+ break;
+ }
+ mdelay(1);
+ } while (--timeout);
+
+ return ret;
+}
+
+static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u32 pay_size, hdr_size;
+ u32 *hdr, *pay;
+ int ret;
+ u8 pci_func = trans->func_id;
+
+ if (__qlcnic_sriov_issue_bc_post(vf))
+ return -EBUSY;
+
+ if (type == QLC_BC_COMMAND) {
+ hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
+ pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ pay_size = (pay_size / sizeof(u32));
+ } else {
+ hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
+ pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ pay_size = (pay_size / sizeof(u32));
+ }
+
+ ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
+ pci_func, pay_size);
+ return ret;
+}
+
+static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf, u8 type)
+{
+ bool flag = true;
+ int err = -EIO;
+
+ while (flag) {
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ trans->trans_state = QLC_ABORT;
+
+ switch (trans->trans_state) {
+ case QLC_INIT:
+ trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
+ if (qlcnic_sriov_issue_bc_post(trans, type))
+ trans->trans_state = QLC_ABORT;
+ break;
+ case QLC_WAIT_FOR_CHANNEL_FREE:
+ qlcnic_sriov_wait_for_channel_free(trans, type);
+ break;
+ case QLC_WAIT_FOR_RESP:
+ qlcnic_sriov_wait_for_resp(trans);
+ break;
+ case QLC_END:
+ err = 0;
+ flag = false;
+ break;
+ case QLC_ABORT:
+ err = -EIO;
+ flag = false;
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ break;
+ default:
+ err = -EIO;
+ flag = false;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans, int pci_func)
+{
+ struct qlcnic_vf_info *vf;
+ int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return -EIO;
+
+ vf = &adapter->ahw->sriov->vf_info[index];
+ trans->vf = vf;
+ trans->func_id = pci_func;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ if (qlcnic_sriov_pf_check(adapter))
+ return -EIO;
+ if (qlcnic_sriov_vf_check(adapter) &&
+ trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return -EIO;
+ }
+
+ mutex_lock(&vf->send_cmd_lock);
+ vf->send_cmd = trans;
+ err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
+ qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
+ mutex_unlock(&vf->send_cmd_lock);
+ return err;
+}
+
+static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+#ifdef CONFIG_QLCNIC_SRIOV
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
+ return;
+ }
+#endif
+ cmd->rsp.arg[0] |= (0x9 << 25);
+ return;
+}
+
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
+ trans_work);
+ struct qlcnic_bc_trans *trans = NULL;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u8 req;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (test_bit(QLC_BC_VF_FLR, &vf->state))
+ return;
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ trans = list_first_entry(&vf->rcv_act.wait_list,
+ struct qlcnic_bc_trans, list);
+ adapter = vf->adapter;
+
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
+ QLC_BC_RESPONSE))
+ goto cleanup_trans;
+
+ __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
+ trans->trans_state = QLC_INIT;
+ __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
+
+cleanup_trans:
+ qlcnic_free_mbx_args(&cmd);
+ req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
+ qlcnic_sriov_cleanup_transaction(trans);
+ if (req)
+ qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+}
+
+static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ u32 pay_size;
+
+ if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ return;
+
+ trans = vf->send_cmd;
+
+ if (trans == NULL)
+ goto clear_send;
+
+ if (trans->trans_id != hdr->seq_id)
+ goto clear_send;
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
+ (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
+ pay_size);
+ if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ goto clear_send;
+
+ complete(&trans->resp_cmpl);
+
+clear_send:
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+}
+
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ t_list->count++;
+ list_add_tail(&trans->list, &t_list->wait_list);
+ if (t_list->count == 1)
+ qlcnic_sriov_schedule_bc_cmd(sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+ return 0;
+}
+
+static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ spin_lock(&t_list->lock);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock(&t_list->lock);
+ return 0;
+}
+
+static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_hdr *hdr)
+{
+ struct qlcnic_bc_trans *trans = NULL;
+ struct list_head *node;
+ u32 pay_size, curr_frag;
+ u8 found = 0, active = 0;
+
+ spin_lock(&vf->rcv_pend.lock);
+ if (vf->rcv_pend.count > 0) {
+ list_for_each(node, &vf->rcv_pend.wait_list) {
+ trans = list_entry(node, struct qlcnic_bc_trans, list);
+ if (trans->trans_id == hdr->seq_id) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ curr_frag = trans->curr_req_frag;
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ curr_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + curr_frag),
+ (u32 *)(trans->req_pay + curr_frag),
+ pay_size);
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag >= hdr->num_frags) {
+ vf->rcv_pend.count--;
+ list_del(&trans->list);
+ active = 1;
+ }
+ }
+ spin_unlock(&vf->rcv_pend.lock);
+
+ if (active)
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans))
+ qlcnic_sriov_cleanup_transaction(trans);
+
+ return;
+}
+
+static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 pay_size;
+ int err;
+ u8 cmd_op;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
+ hdr->op_type != QLC_BC_CMD &&
+ hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return;
+
+ if (hdr->frag_num > 1) {
+ qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
+ return;
+ }
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd_op = hdr->cmd_op;
+ if (qlcnic_sriov_alloc_bc_trans(&trans))
+ return;
+
+ if (hdr->op_type == QLC_BC_CMD)
+ err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
+ else
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
+
+ if (err) {
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ cmd.op_type = hdr->op_type;
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
+ QLC_BC_COMMAND)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + trans->curr_req_frag),
+ (u32 *)(trans->req_pay + trans->curr_req_frag),
+ pay_size);
+ trans->func_id = vf->pci_func;
+ trans->vf = vf;
+ trans->trans_id = hdr->seq_id;
+ trans->curr_req_frag++;
+
+ if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
+ return;
+
+ if (trans->curr_req_frag == trans->req_hdr->num_frags) {
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+ } else {
+ spin_lock(&vf->rcv_pend.lock);
+ list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
+ vf->rcv_pend.count++;
+ spin_unlock(&vf->rcv_pend.lock);
+ }
+}
+
+static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr hdr;
+ u32 *ptr = (u32 *)&hdr;
+ u8 msg_type, i;
+
+ for (i = 2; i < 6; i++)
+ ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
+ msg_type = hdr.msg_type;
+
+ switch (msg_type) {
+ case QLC_BC_COMMAND:
+ qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
+ break;
+ case QLC_BC_RESPONSE:
+ qlcnic_sriov_handle_bc_resp(&hdr, vf);
+ break;
+ }
+}
+
+static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_handle_flr(sriov, vf);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Invalid event to VF. VF should not get FLR event\n");
+}
+
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
+{
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_sriov *sriov;
+ int index;
+ u8 pci_func;
+
+ sriov = adapter->ahw->sriov;
+ pci_func = qlcnic_sriov_target_func_id(event);
+ index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return;
+
+ vf = &sriov->vf_info[index];
+ vf->pci_func = pci_func;
+
+ if (qlcnic_sriov_channel_free_check(event))
+ complete(&vf->ch_free_cmpl);
+
+ if (qlcnic_sriov_flr_check(event)) {
+ qlcnic_sriov_handle_flr_event(sriov, vf);
+ return;
+ }
+
+ if (qlcnic_sriov_bc_msg_check(event))
+ qlcnic_sriov_handle_msg_event(sriov, vf);
+}
+
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
+ return -ENOMEM;
+
+ if (enable)
+ cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+
+ err = qlcnic_83xx_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s bc events, err=%d\n",
+ (enable ? "enable" : "disable"), err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans)
+{
+ u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
+ u32 state;
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (state == QLC_83XX_IDC_DEV_READY) {
+ msleep(20);
+ clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
+ trans->trans_state = QLC_INIT;
+ if (++adapter->fw_fail_cnt > max)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_bc_trans *trans;
+ int err;
+ u32 rsp_data, opcode, mbx_err_code, rsp;
+ u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
+ u8 func = ahw->pci_func;
+
+ rsp = qlcnic_sriov_alloc_bc_trans(&trans);
+ if (rsp)
+ goto free_cmd;
+
+ rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
+ if (rsp)
+ goto cleanup_transaction;
+
+retry:
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ rsp = -EIO;
+ QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
+ QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
+ goto err_out;
+ }
+
+ err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
+ if (err) {
+ dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
+ (cmd->req.arg[0] & 0xffff), func);
+ rsp = QLCNIC_RCODE_TIMEOUT;
+
+ /* After adapter reset PF driver may take some time to
+ * respond to VF's request. Retry request till maximum retries.
+ */
+ if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ !qlcnic_sriov_retry_bc_cmd(adapter, trans))
+ goto retry;
+
+ goto err_out;
+ }
+
+ rsp_data = cmd->rsp.arg[0];
+ mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
+ opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
+
+ if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
+ (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
+ }
+
+err_out:
+ if (rsp == QLCNIC_RCODE_TIMEOUT) {
+ ahw->reset_context = 1;
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ }
+
+cleanup_transaction:
+ qlcnic_sriov_cleanup_transaction(trans);
+
+free_cmd:
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ }
+
+ return rsp;
+}
+
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+ return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+ else
+ return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
+ return -ENOMEM;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
+ ret);
+ goto out;
+ }
+
+ cmd_op = (cmd.rsp.arg[0] & 0xff);
+ if (cmd.rsp.arg[0] >> 25 == 2)
+ return 2;
+ if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ u16 vlan_id;
+ int i;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, mac, 0);
+ } else {
+ spin_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id)
+ qlcnic_nic_add_mac(adapter, mac, vlan_id);
+ }
+ spin_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter))
+ qlcnic_nic_add_mac(adapter, mac, 0);
+ }
+}
+
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
+{
+ struct list_head *head = &bc->async_list;
+ struct qlcnic_async_work_list *entry;
+
+ flush_workqueue(bc->bc_async_wq);
+ while (!list_empty(head)) {
+ entry = list_entry(head->next, struct qlcnic_async_work_list,
+ list);
+ cancel_work_sync(&entry->work);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct netdev_hw_addr *ha;
+ u32 mode = VPORT_MISS_MODE_DROP;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else {
+ qlcnic_vf_add_mc_list(netdev, bcast_addr);
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
+ }
+
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
+
+ if (adapter->pdev->is_virtfn) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+ }
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
+
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
+{
+ struct qlcnic_async_work_list *entry;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args *cmd;
+
+ entry = container_of(work, struct qlcnic_async_work_list, work);
+ adapter = entry->ptr;
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(adapter, cmd);
+ return;
+}
+
+static struct qlcnic_async_work_list *
+qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+{
+ struct list_head *node;
+ struct qlcnic_async_work_list *entry = NULL;
+ u8 empty = 0;
+
+ list_for_each(node, &bc->async_list) {
+ entry = list_entry(node, struct qlcnic_async_work_list, list);
+ if (!work_pending(&entry->work)) {
+ empty = 1;
+ break;
+ }
+ }
+
+ if (!empty) {
+ entry = kzalloc(sizeof(struct qlcnic_async_work_list),
+ GFP_ATOMIC);
+ if (entry == NULL)
+ return NULL;
+ list_add_tail(&entry->list, &bc->async_list);
+ }
+
+ return entry;
+}
+
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_async_work_list *entry = NULL;
+
+ entry = qlcnic_sriov_get_free_node_async_work(bc);
+ if (!entry)
+ return;
+
+ entry->ptr = data;
+ entry->cmd = cmd;
+ INIT_WORK(&entry->work, func);
+ queue_work(bc->bc_async_wq, &entry->work);
+}
+
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+
+ if (adapter->need_fw_reset)
+ return -EIO;
+
+ qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+ adapter, cmd);
+ return 0;
+}
+
+static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ adapter->need_fw_reset = 0;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_cleanup_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_term_channel;
+
+ return 0;
+
+err_out_term_channel:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_cleanup_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ return err;
+}
+
+static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (!qlcnic_up(adapter, netdev))
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+}
+
+static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
+ struct net_device *netdev = adapter->netdev;
+ u8 i, max_ints = ahw->num_msix - 1;
+
+ netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ for (i = 0; i < max_ints; i++) {
+ intr_tbl[i].id = i;
+ intr_tbl[i].enabled = 0;
+ intr_tbl[i].src = 0;
+ }
+ ahw->reset_context = 0;
+}
+
+static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev,
+ "%s: Reinitialization of VF 0x%x done after FW reset\n",
+ __func__, func);
+ } else {
+ dev_err(dev,
+ "%s: Reinitialization of VF 0x%x failed after FW reset\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "Current state 0x%x after FW reset\n",
+ state);
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ adapter->reset_ctx_cnt++;
+
+ /* Skip the context reset and check if FW is hung */
+ if (adapter->reset_ctx_cnt < 3) {
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ dev_info(dev,
+ "Resetting context, wait here to check if FW is in failed state\n");
+ return 0;
+ }
+
+ /* Check if number of resets exceed the threshold.
+ * If it exceeds the threshold just fail the VF.
+ */
+ if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ adapter->tx_timeo_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ qlcnic_sriov_vf_detach(adapter);
+ dev_err(dev,
+ "Device context resets have exceeded the threshold, device interface will be shutdown\n");
+ return -EIO;
+ }
+
+ dev_info(dev, "Resetting context of VF 0x%x\n", func);
+ dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
+ __func__, adapter->reset_ctx_cnt, func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ adapter->need_fw_reset = 0;
+
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev, "Done resetting context for VF 0x%x\n", func);
+ } else {
+ dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
+ ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
+ else if (ahw->reset_context)
+ ret = qlcnic_sriov_vf_handle_context_reset(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_err(&adapter->pdev->dev, "Device is in failed state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
+ qlcnic_sriov_vf_detach(adapter);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return -EIO;
+}
+
+static int
+qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+ u8 func = adapter->ahw->pci_func;
+
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware hang detected by VF 0x%x\n", func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
+ return 0;
+}
+
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlc_83xx_idc *idc;
+ int ret = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ idc = &adapter->ahw->idc;
+ idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ switch (idc->curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ ret = qlcnic_sriov_vf_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ case QLC_83XX_IDC_DEV_INIT:
+ ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ ret = qlcnic_sriov_vf_idc_failed_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ break;
+ default:
+ ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
+ }
+
+ idc->prev_state = idc->curr_state;
+ qlcnic_sriov_vf_periodic_tasks(adapter);
+
+ if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ idc->delay);
+}
+
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(20);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i, err = -EINVAL;
+
+ if (!vf->sriov_vlans)
+ return err;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ err = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ int err = 0;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ if (vf->num_vlan >= sriov->num_allowed_vlans)
+ err = -EINVAL;
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
+ u16 vid, u8 enable)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ bool vlan_exist;
+ u8 allowed = 0;
+ int i;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+ vlan_exist = qlcnic_sriov_check_any_vlan(vf);
+ if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
+ return -EINVAL;
+
+ if (enable) {
+ if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+ return -EINVAL;
+
+ if (qlcnic_sriov_validate_num_vlans(sriov, vf))
+ return -EINVAL;
+
+ if (sriov->any_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (sriov->allowed_vlans[i] == vid)
+ allowed = 1;
+ }
+
+ if (!allowed)
+ return -EINVAL;
+ }
+ } else {
+ if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+ enum qlcnic_vlan_operations opcode)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_sriov *sriov;
+
+ sriov = adapter->ahw->sriov;
+
+ if (!vf->sriov_vlans)
+ return;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ switch (opcode) {
+ case QLC_VLAN_ADD:
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+ break;
+ case QLC_VLAN_DELETE:
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+ }
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return;
+}
+
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
+ u16 vid, u8 enable)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_cmd_args cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (vid == 0)
+ return 0;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+ ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = (enable & 1) | vid << 16;
+
+ qlcnic_sriov_cleanup_async_list(&sriov->bc);
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure guest VLAN, err=%d\n", ret);
+ } else {
+ netif_addr_lock_bh(netdev);
+ qlcnic_free_mac_list(adapter);
+ netif_addr_unlock_bh(netdev);
+
+ if (enable)
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
+ else
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
+
+ netif_addr_lock_bh(netdev);
+ qlcnic_set_multi(netdev);
+ netif_addr_unlock_bh(netdev);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+ QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (!err) {
+ if (netif_running(netdev)) {
+ err = qlcnic_up(adapter, netdev);
+ if (!err)
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ idc->delay);
+ return err;
+}
+
+void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ }
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ kfree(vf->sriov_vlans);
+ vf->sriov_vlans = NULL;
+ }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (!vf->sriov_vlans[i]) {
+ vf->sriov_vlans[i] = vlan_id;
+ vf->num_vlan++;
+ return;
+ }
+ }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ vf->sriov_vlans[i] = 0;
+ vf->num_vlan--;
+ return;
+ }
+ }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+ bool err = false;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ if (vf->num_vlan)
+ err = true;
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644
index 00000000000..a29538b86ed
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -0,0 +1,2047 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include <linux/types.h>
+
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
+#define QLC_VF_MIN_TX_RATE 100
+#define QLC_VF_MAX_TX_RATE 9999
+#define QLC_MAC_OPCODE_MASK 0x7
+#define QLC_VF_FLOOD_BIT BIT_16
+#define QLC_FLOOD_MODE 0x5
+#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
+#define QLC_INTR_COAL_TYPE_MASK 0x7
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
+
+struct qlcnic_sriov_cmd_handler {
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+struct qlcnic_sriov_fw_cmd_handler {
+ u32 cmd;
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info,
+ u16 vport_id)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = (vport_id << 16) | 0x1;
+ cmd.req.arg[2] = npar_info->bit_offsets;
+ cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
+ cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
+ cmd.req.arg[4] = npar_info->max_tx_mac_filters;
+ cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
+ cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
+ (npar_info->max_rx_ip_addr << 16);
+ cmd.req.arg[6] = npar_info->max_rx_lro_flow |
+ (npar_info->max_rx_status_rings << 16);
+ cmd.req.arg[7] = npar_info->max_rx_buf_rings |
+ (npar_info->max_rx_ques << 16);
+ cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
+ cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
+ cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to set vport info, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u16 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_resources *res = &sriov->ff_max;
+ u16 num_macs = sriov->num_allowed_vlans + 1;
+ int ret = -EIO, vpid, id;
+ struct qlcnic_vport *vp;
+ u32 num_vfs, max, temp;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0)
+ return -EINVAL;
+
+ num_vfs = sriov->num_vfs;
+ max = num_vfs + 1;
+ info->bit_offsets = 0xffff;
+ info->max_tx_ques = res->num_tx_queues / max;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
+
+ info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
+
+ if (adapter->ahw->pci_func == func) {
+ info->min_tx_bw = 0;
+ info->max_tx_bw = MAX_BW;
+
+ temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ info->max_tx_mac_filters = temp;
+ temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ temp = res->num_rx_mcast_mac_filters - temp;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
+ } else {
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+ vp = sriov->vf_info[id].vp;
+ info->min_tx_bw = vp->min_tx_bw;
+ info->max_tx_bw = vp->max_tx_bw;
+
+ info->max_rx_ucast_mac_filters = num_macs;
+ info->max_tx_mac_filters = num_macs;
+ temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = QLCNIC_SINGLE_RING;
+ }
+
+ info->max_rx_ip_addr = res->num_destip / max;
+ info->max_rx_status_rings = res->num_rx_status_rings / max;
+ info->max_rx_buf_rings = res->num_rx_buf_rings / max;
+ info->max_rx_ques = res->num_rx_queues / max;
+ info->max_rx_lro_flow = res->num_lro_flows_supported / max;
+ info->max_tx_vlan_keys = res->num_txvlan_keys;
+ info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
+ info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
+
+ ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
+
+ ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
+ ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
+ ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
+ ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
+ ff_max->num_rx_queues = info->max_rx_ques;
+ ff_max->num_tx_queues = info->max_tx_ques;
+ ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
+ ff_max->num_destip = info->max_rx_ip_addr;
+ ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
+ ff_max->num_rx_status_rings = info->max_rx_status_rings;
+ ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
+ ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
+}
+
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int temp, total_fn;
+
+ temp = npar_info->max_rx_mcast_mac_filters;
+ total_fn = sriov->num_vfs + 1;
+
+ temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ sriov->num_allowed_vlans = temp - 1;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ sriov->num_allowed_vlans = 1;
+
+ netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ sriov->num_allowed_vlans);
+}
+
+static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = 0x2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PF info, err=%d\n", err);
+ goto out;
+ }
+
+ npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
+ npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
+ npar_info->max_vports = MSW(cmd.rsp.arg[2]);
+ npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
+ npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
+ npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
+ npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+
+ qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
+ qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
+ dev_info(&adapter->pdev->dev,
+ "\n\ttotal_pf: %d,\n"
+ "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
+ npar_info->total_pf, npar_info->total_rss_engines,
+ npar_info->max_vports, npar_info->max_tx_ques,
+ npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = 0;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = 0;
+ }
+}
+
+static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
+ u16 vport_handle, u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = vport_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = vport_handle;
+ }
+}
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ return sriov->vp_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index >= 0) {
+ vf_info = &sriov->vf_info[index];
+ return vf_info->vp->handle;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
+ u8 flag, u16 func)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret;
+ int vpid;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
+ return -ENOMEM;
+
+ if (flag) {
+ cmd.req.arg[3] = func << 8;
+ } else {
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
+ }
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed %s vport, err %d for func 0x%x\n",
+ (flag ? "enable" : "disable"), ret, func);
+ goto out;
+ }
+
+ if (flag) {
+ vpid = cmd.rsp.arg[2] & 0xffff;
+ qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
+ } else {
+ qlcnic_sriov_pf_reset_vport_handle(adapter, func);
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
+ u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x4;
+ if (enable) {
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ cmd.req.arg[1] |= BIT_16;
+ if (qlcnic_84xx_check(adapter))
+ cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ } else {
+ adapter->flags &= ~QLCNIC_VLAN_FILTERING;
+ }
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VLAN filtering, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VF Flood bit on PF, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
+ u8 func, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
+ return -ENOMEM;
+
+ cmd.req.arg[0] |= (3 << 29);
+ cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
+ if (enable)
+ cmd.req.arg[1] |= BIT_0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable sriov eswitch%d\n", err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ cancel_work_sync(&sriov->vf_info[i].flr_work);
+
+ destroy_workqueue(bc->bc_flr_wq);
+}
+
+static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+ struct workqueue_struct *wq;
+
+ wq = create_singlethread_workqueue("qlcnic-flr");
+ if (wq == NULL) {
+ dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n");
+ return -ENOMEM;
+ }
+
+ bc->bc_flr_wq = wq;
+ return 0;
+}
+
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
+{
+ u8 func = adapter->ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+}
+
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
+{
+ if (!qlcnic_sriov_pf_check(adapter))
+ return;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ pci_disable_sriov(adapter->pdev);
+ netdev_info(adapter->netdev,
+ "SR-IOV is disabled successfully on port %d\n",
+ adapter->portnum);
+}
+
+static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (pci_vfs_assigned(adapter->pdev)) {
+ netdev_err(adapter->netdev,
+ "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
+ adapter->portnum);
+ netdev_info(adapter->netdev,
+ "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
+ adapter->portnum);
+ return -EPERM;
+ }
+
+ qlcnic_sriov_pf_disable(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_free_vlans(adapter);
+
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ /* After disabling SRIOV re-init the driver in default mode
+ configure opmode based on op_mode of function
+ */
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ rtnl_unlock();
+ return -EIO;
+ }
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ return 0;
+}
+
+static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info, pf_info, vp_info;
+ int err;
+ u8 func = ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1);
+ if (err)
+ return err;
+
+ if (qlcnic_84xx_check(adapter)) {
+ err = qlcnic_sriov_pf_cfg_flood(adapter);
+ if (err)
+ goto disable_vlan_filtering;
+ }
+
+ err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
+ if (err)
+ goto disable_vlan_filtering;
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (err)
+ goto disable_eswitch;
+
+ err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto delete_vport;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->nic_mode = QLC_83XX_SRIOV_MODE;
+ return err;
+
+delete_vport:
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+
+disable_eswitch:
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+
+disable_vlan_filtering:
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (err)
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ return err;
+}
+
+static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
+ int num_vfs)
+{
+ int err = 0;
+
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_init(adapter, num_vfs);
+ if (err)
+ goto clear_op_mode;
+
+ err = qlcnic_sriov_pf_create_flr_queue(adapter);
+ if (err)
+ goto sriov_cleanup;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ goto del_flr_queue;
+
+ qlcnic_sriov_alloc_vlans(adapter);
+
+ return err;
+
+del_flr_queue:
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+
+sriov_cleanup:
+ __qlcnic_sriov_cleanup(adapter);
+
+clear_op_mode:
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ return err;
+}
+
+static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(netdev,
+ "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
+ return -EIO;
+ }
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
+ if (err)
+ goto error;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ if (!err) {
+ netdev_info(netdev,
+ "SR-IOV is enabled successfully on port %d\n",
+ adapter->portnum);
+ /* Return number of vfs enabled */
+ return num_vfs;
+ }
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+error:
+ if (!qlcnic_83xx_configure_opmode(adapter)) {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+ }
+
+ rtnl_unlock();
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
+ return err;
+}
+
+int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (num_vfs == 0)
+ err = qlcnic_pci_sriov_disable(adapter);
+ else
+ err = qlcnic_pci_sriov_enable(adapter, num_vfs);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vport *vp;
+ int err, id;
+ u8 *mac;
+
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+
+ vp = adapter->ahw->sriov->vf_info[id].vp;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x3 | func << 16;
+ if (vp->spoofchk == true) {
+ mac = vp->mac;
+ cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8;
+ cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 |
+ mac[2] << 24;
+ cmd.req.arg[5] = mac[1] | mac[0] << 8;
+ }
+
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ cmd.req.arg[2] |= BIT_6;
+ cmd.req.arg[3] |= vp->pvid << 8;
+ }
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
+ u16 func)
+{
+ struct qlcnic_info defvp_info;
+ int err;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
+ if (err)
+ return -EIO;
+
+ err = qlcnic_sriov_set_vf_acl(adapter, func);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
+ u16 func = vf->pci_func;
+ size_t size;
+ int err;
+
+ adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (!err) {
+ err = qlcnic_sriov_set_vf_vport_info(adapter, func);
+ if (err)
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+ } else {
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ size = sizeof(*vf->sriov_vlans);
+ size = size * sriov->num_allowed_vlans;
+ memset(vf->sriov_vlans, 0, size);
+ }
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+
+ if (err)
+ goto err_out;
+
+ cmd->rsp.arg[0] |= (1 << 25);
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+ return err;
+
+err_out:
+ cmd->rsp.arg[0] |= (2 << 25);
+ return err;
+}
+
+static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ u16 vlan, u8 op)
+{
+ struct qlcnic_cmd_args *cmd;
+ struct qlcnic_macvlan_mbx mv;
+ struct qlcnic_vport *vp;
+ u8 *addr;
+ int err;
+ u32 *buf;
+ int vpid;
+
+ vp = vf->vp;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto free_cmd;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid < 0) {
+ err = -EINVAL;
+ goto free_args;
+ }
+
+ if (vlan)
+ op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
+
+ cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+
+ addr = vp->mac;
+ mv.vlan = vlan;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd->req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ return err;
+
+free_args:
+ qlcnic_free_mbx_args(cmd);
+free_cmd:
+ kfree(cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ int opcode)
+{
+ struct qlcnic_sriov *sriov;
+ u16 vlan;
+ int i;
+
+ sriov = adapter->ahw->sriov;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+ if (vf->num_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan = vf->sriov_vlans[i];
+ if (vlan)
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ opcode);
+ }
+ }
+ spin_unlock_bh(&vf->vlan_list_lock);
+
+ if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ if (qlcnic_83xx_pf_check(adapter) &&
+ qlcnic_sriov_check_any_vlan(vf))
+ return;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ }
+}
+
+static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_rx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[6] = vf->vp->handle;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err) {
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
+ vf->rx_ctx_id = mbx_out->ctx_id;
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
+ } else {
+ vf->rx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u8 type, *mac;
+
+ type = cmd->req.arg[1];
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ cmd->rsp.arg[0] = (2 << 25);
+ break;
+ case QLCNIC_GET_CURRENT_MAC:
+ cmd->rsp.arg[0] = (1 << 25);
+ mac = vf->vp->mac;
+ cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
+ cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
+ ((mac[3]) << 16 & 0xff0000) |
+ ((mac[2]) << 24 & 0xff000000);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_tx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[5] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err) {
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
+ vf->tx_ctx_id = mbx_out->ctx_id;
+ } else {
+ vf->tx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->rx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->tx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err = -EIO;
+ u8 op;
+
+ op = cmd->req.arg[1] & 0xff;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
+ return -EINVAL;
+
+ if (!(cmd->req.arg[1] & BIT_16))
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xff) != 0x1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ if (cmd->req.arg[2] > adapter->ahw->max_mtu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] & BIT_31) {
+ if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
+ return -EINVAL;
+ } else {
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u16 ctx_id, pkts, time;
+ int err = -EINVAL;
+ u8 type;
+
+ type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
+ ctx_id = cmd->req.arg[1] >> 16;
+ pkts = cmd->req.arg[2] & 0xffff;
+ time = cmd->req.arg[2] >> 16;
+
+ switch (type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+ time < coal->rx_time_us)
+ goto err_label;
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+ time < coal->tx_time_us)
+ goto err_label;
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+ type);
+ return err;
+ }
+
+ return 0;
+
+err_label:
+ netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+ vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+ vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+ netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+ ctx_id, pkts, time, type);
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vport *vp = vf->vp;
+ u8 op, new_op;
+
+ if (!(cmd->req.arg[1] & BIT_8))
+ return -EINVAL;
+
+ cmd->req.arg[1] |= (vf->vp->handle << 16);
+ cmd->req.arg[1] |= BIT_31;
+
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ op = cmd->req.arg[1] & 0x7;
+ cmd->req.arg[1] &= ~0x7;
+ new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+ cmd->req.arg[3] |= vp->pvid << 16;
+ cmd->req.arg[1] |= new_op;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_linkevent(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_vport *vp = vf->vp;
+ u8 cmd_op, mode = vp->vlan_mode;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
+
+ adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
+
+ cmd_op = trans->req_hdr->cmd_op;
+ cmd->rsp.arg[0] |= 1 << 25;
+
+ /* For 84xx adapter in case of PVID , PFD should send vlan mode as
+ * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
+ */
+ if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
+ return 0;
+
+ switch (mode) {
+ case QLC_GUEST_VLAN_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8;
+ cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
+ break;
+ case QLC_PVID_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
+ break;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ u16 vlan;
+
+ if (!qlcnic_sriov_check_any_vlan(vf))
+ return -EINVAL;
+
+ vlan = cmd->req.arg[1] >> 16;
+ if (!vf->rx_ctx_id) {
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+ return 0;
+ }
+
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 0, QLCNIC_MAC_ADD);
+ return 0;
+}
+
+static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int err = -EIO;
+ u16 vlan;
+
+ if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
+ return err;
+
+ vlan = cmd->req.arg[1] >> 16;
+
+ if (!vf->rx_ctx_id) {
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+ return 0;
+ }
+
+ if (qlcnic_83xx_pf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ }
+
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
+
+ if (err) {
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_ADD);
+ return err;
+ }
+
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_vport *vp = vf->vp;
+ int err = -EIO;
+ u8 op;
+
+ if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) {
+ cmd->rsp.arg[0] |= 2 << 25;
+ return err;
+ }
+
+ op = cmd->req.arg[1] & 0xf;
+
+ if (op)
+ err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
+ else
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
+
+ cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
+ return err;
+}
+
+static const int qlcnic_pf_passthru_supp_cmds[] = {
+ QLCNIC_CMD_GET_STATISTICS,
+ QLCNIC_CMD_GET_PORT_CONFIG,
+ QLCNIC_CMD_GET_LINK_STATUS,
+ QLCNIC_CMD_INIT_NIC_FUNC,
+ QLCNIC_CMD_STOP_NIC_FUNC,
+};
+
+static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
+ [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd},
+ [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd},
+};
+
+static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
+ {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
+ {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
+ {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
+ {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
+ {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
+ {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
+ {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
+ {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
+ {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
+};
+
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 size, cmd_op;
+
+ cmd_op = trans->req_hdr->cmd_op;
+
+ if (trans->req_hdr->op_type == QLC_BC_CMD) {
+ size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
+ if (cmd_op < size) {
+ qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
+ return;
+ }
+ } else {
+ int i;
+ size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
+ qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
+ return;
+ }
+ }
+
+ size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
+ qlcnic_issue_cmd(adapter, cmd);
+ return;
+ }
+ }
+ }
+
+ cmd->rsp.arg[0] |= (0x9 << 25);
+}
+
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid;
+}
+
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ int vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->rx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->rx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->tx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->tx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+ unsigned long flag;
+
+ spin_lock_irqsave(&t_list->lock, flag);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock_irqrestore(&t_list->lock, flag);
+ return 0;
+}
+
+static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_del_tx_ctx(adapter, vf);
+ qlcnic_sriov_del_rx_ctx(adapter, vf);
+ }
+
+ qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func);
+
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf,
+ vf->flr_trans);
+ clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = NULL;
+ }
+}
+
+static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf;
+
+ vf = container_of(work, struct qlcnic_vf_info, flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ return;
+}
+
+static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state))
+ return;
+
+ INIT_WORK(&vf->flr_work, func);
+ queue_work(sriov->bc.bc_flr_wq, &vf->flr_work);
+}
+
+static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+
+ set_bit(QLC_BC_VF_FLR, &vf->state);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ set_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = trans;
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(adapter->netdev, "Software FLR for PCI func %d\n",
+ vf->pci_func);
+}
+
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr *hdr = trans->req_hdr;
+
+ if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ (hdr->op_type == QLC_BC_CMD) &&
+ test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ qlcnic_sriov_handle_soft_flr(adapter, trans, vf);
+ return true;
+ }
+
+ return false;
+}
+
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct net_device *dev = vf->adapter->netdev;
+ struct qlcnic_vport *vp = vf->vp;
+
+ if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ return;
+ }
+
+ if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) {
+ netdev_info(dev, "FLR for PCI func %d in progress\n",
+ vf->pci_func);
+ return;
+ }
+
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+ memset(vf->sriov_vlans, 0,
+ sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
+
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
+}
+
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_sriov *sriov = ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ u16 num_vfs = sriov->num_vfs;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->rx_ctx_id = 0;
+ vf->tx_ctx_id = 0;
+ cancel_work_sync(&vf->flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ }
+
+ qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func);
+ QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8);
+}
+
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ return err;
+
+ dev_info(&adapter->pdev->dev, "%s: op_mode %d\n",
+ __func__, ahw->op_mode);
+ return err;
+}
+
+int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vfs;
+ struct qlcnic_vf_info *vf_info;
+ u8 *curr_mac;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ num_vfs = sriov->num_vfs;
+
+ if (!is_valid_ether_addr(mac) || vf >= num_vfs)
+ return -EINVAL;
+
+ if (ether_addr_equal(adapter->mac_addr, mac)) {
+ netdev_err(netdev, "MAC address is already in use by the PF\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_vfs; i++) {
+ vf_info = &sriov->vf_info[i];
+ if (ether_addr_equal(vf_info->vp->mac, mac)) {
+ netdev_err(netdev,
+ "MAC address is already in use by VF %d\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ curr_mac = vf_info->vp->mac;
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(curr_mac, mac, netdev->addr_len);
+ netdev_info(netdev, "MAC Address %pM is configured for VF %d\n",
+ mac, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_info nic_info;
+ struct qlcnic_vport *vp;
+ u16 vpid;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ vpid = vp->handle;
+
+ if (!min_tx_rate)
+ min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+ if (max_tx_rate &&
+ (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
+ netdev_err(netdev,
+ "Invalid max Tx rate, allowed range is [%d - %d]",
+ min_tx_rate, QLC_VF_MAX_TX_RATE);
+ return -EINVAL;
+ }
+
+ if (!max_tx_rate)
+ max_tx_rate = 10000;
+
+ if (min_tx_rate &&
+ (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ netdev_err(netdev,
+ "Invalid min Tx rate, allowed range is [%d - %d]",
+ QLC_VF_MIN_TX_RATE, max_tx_rate);
+ return -EINVAL;
+ }
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+
+ nic_info.max_tx_bw = max_tx_rate / 100;
+ nic_info.min_tx_bw = min_tx_rate / 100;
+ nic_info.bit_offsets = BIT_0;
+
+ if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+ }
+
+ vp->max_tx_bw = max_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ max_tx_rate, vp->max_tx_bw, vf);
+ vp->min_tx_bw = min_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ min_tx_rate, vp->min_tx_bw, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs || qos > 7)
+ return -EINVAL;
+
+ if (vlan > MAX_VLAN_ID) {
+ netdev_err(netdev,
+ "Invalid VLAN ID, allowed range is [0 - %d]\n",
+ MAX_VLAN_ID);
+ return -EINVAL;
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ memset(vf_info->sriov_vlans, 0,
+ sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
+ switch (vlan) {
+ case 4095:
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+ break;
+ case 0:
+ vp->vlan_mode = QLC_NO_VLAN_MODE;
+ vp->qos = 0;
+ break;
+ default:
+ vp->vlan_mode = QLC_PVID_MODE;
+ qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
+ vp->qos = qos;
+ vp->pvid = vlan;
+ }
+
+ netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
+ vlan, qos, vf);
+ return 0;
+}
+
+static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp, int vf)
+{
+ __u32 vlan = 0;
+
+ switch (vp->vlan_mode) {
+ case QLC_PVID_MODE:
+ vlan = vp->pvid;
+ break;
+ case QLC_GUEST_VLAN_MODE:
+ vlan = MAX_VLAN_ID;
+ break;
+ case QLC_NO_VLAN_MODE:
+ vlan = 0;
+ break;
+ default:
+ netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
+ vp->vlan_mode, vf);
+ }
+
+ return vlan;
+}
+
+int qlcnic_sriov_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vp = sriov->vf_info[vf].vp;
+ memcpy(&ivi->mac, vp->mac, ETH_ALEN);
+ ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
+ ivi->qos = vp->qos;
+ ivi->spoofchk = vp->spoofchk;
+ if (vp->max_tx_bw == MAX_BW)
+ ivi->max_tx_rate = 0;
+ else
+ ivi->max_tx_rate = vp->max_tx_bw * 100;
+ if (vp->min_tx_bw == MIN_BW)
+ ivi->min_tx_rate = 0;
+ else
+ ivi->min_tx_rate = vp->min_tx_bw * 100;
+
+ ivi->vf = vf;
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ vp->spoofchk = chk;
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 341d37c867f..f5786d5792d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1,8 +1,15 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
@@ -12,6 +19,12 @@
#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
+
+#define QLC_STATUS_UNSUPPORTED_CMD -2
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
@@ -37,10 +50,10 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev,
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto err_out;
- if (strict_strtoul(buf, 2, &new))
+ if (kstrtoul(buf, 2, &new))
goto err_out;
- if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
ret = len;
err_out:
@@ -67,7 +80,7 @@ static ssize_t qlcnic_store_diag_mode(struct device *dev,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
unsigned long new;
- if (strict_strtoul(buf, 2, &new))
+ if (kstrtoul(buf, 2, &new))
return -EINVAL;
if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
@@ -80,9 +93,7 @@ static ssize_t qlcnic_show_diag_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n",
- !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+ return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
}
static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
@@ -106,22 +117,54 @@ static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
return 0;
}
-static ssize_t qlcnic_store_beacon(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
+ const char *buf, size_t len)
{
- struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int max_sds_rings = adapter->max_sds_rings;
- u16 beacon;
- u8 b_state, b_rate;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ unsigned long h_beacon;
int err;
- if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
- dev_warn(dev,
- "LED test not supported in non privileged mode\n");
- return -EOPNOTSUPP;
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ if (kstrtoul(buf, 2, &h_beacon))
+ return -EINVAL;
+
+ qlcnic_get_beacon_state(adapter);
+
+ if (ahw->beacon_state == h_beacon)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
}
+ if (h_beacon)
+ err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+ else
+ err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+ if (!err)
+ ahw->beacon_state = h_beacon;
+
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ rtnl_unlock();
+ return len;
+}
+
+static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
+ const char *buf, size_t len)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, drv_sds_rings = adapter->drv_sds_rings;
+ u16 beacon;
+ u8 b_state, b_rate;
+
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
@@ -130,16 +173,18 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
if (err)
return err;
- if (adapter->ahw->beacon_state == b_state)
+ qlcnic_get_beacon_state(adapter);
+
+ if (ahw->beacon_state == b_state)
return len;
rtnl_lock();
-
- if (!adapter->ahw->beacon_state)
+ if (!ahw->beacon_state) {
if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
rtnl_unlock();
return -EBUSY;
}
+ }
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
err = -EIO;
@@ -154,23 +199,45 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
}
err = qlcnic_config_led(adapter, b_state, b_rate);
-
if (!err) {
err = len;
- adapter->ahw->beacon_state = b_state;
+ ahw->beacon_state = b_state;
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
- qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
+ qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
- out:
- if (!adapter->ahw->beacon_state)
+out:
+ if (!ahw->beacon_state)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
rtnl_unlock();
return err;
}
+static ssize_t qlcnic_store_beacon(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(dev,
+ "LED test not supported in non privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (qlcnic_82xx_check(adapter))
+ err = qlcnic_82xx_store_beacon(adapter, buf, len);
+ else if (qlcnic_83xx_check(adapter))
+ err = qlcnic_83xx_store_beacon(adapter, buf, len);
+ else
+ return -EIO;
+
+ return err;
+}
+
static ssize_t qlcnic_show_beacon(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -207,21 +274,13 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
+ qlcnic_read_crb(adapter, buf, offset, size);
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
- memcpy(buf, &qmdata, size);
- } else {
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
- }
return size;
}
@@ -231,21 +290,13 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- memcpy(&qmdata, buf, size);
- qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
- } else {
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
- }
+ qlcnic_write_crb(adapter, buf, offset, size);
return size;
}
@@ -303,33 +354,45 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ int i;
+
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].pci_func == pci_func)
+ return i;
+ }
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
+ return -EINVAL;
+}
+
static int validate_pm_config(struct qlcnic_adapter *adapter,
struct qlcnic_pm_func_cfg *pm_cfg, int count)
{
- u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
- int i;
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i, src_index, dest_index;
for (i = 0; i < count; i++) {
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
- if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
- dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+ src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
+ if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
- if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+ dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
+ if (dest_index < 0)
return QL_STATUS_INVALID_PARAM;
- s_esw_id = adapter->npars[src_pci_func].phy_port;
- d_esw_id = adapter->npars[dest_pci_func].phy_port;
+ s_esw_id = adapter->npars[src_index].phy_port;
+ d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
return QL_STATUS_INVALID_PARAM;
}
- return 0;
+ return 0;
}
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
@@ -342,7 +405,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u32 id, action, pci_func;
- int count, rem, i, ret;
+ int count, rem, i, ret, index;
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
@@ -350,26 +413,34 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
return QL_STATUS_INVALID_PARAM;
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
ret = validate_pm_config(adapter, pm_cfg, count);
+
if (ret)
return ret;
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
action = !!pm_cfg[i].action;
- id = adapter->npars[pci_func].phy_port;
- ret = qlcnic_config_port_mirroring(adapter, id, action,
- pci_func);
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ id = adapter->npars[index].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
if (ret)
return ret;
}
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
- id = adapter->npars[pci_func].phy_port;
- adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
- adapter->npars[pci_func].dest_npar = id;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+ id = adapter->npars[index].phy_port;
+ adapter->npars[index].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[index].dest_npar = id;
}
+
return size;
}
@@ -381,47 +452,64 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u8 pci_func;
+ u32 count;
int i;
- if (size != sizeof(pm_cfg))
- return QL_STATUS_INVALID_PARAM;
-
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ memset(buf, 0, size);
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
continue;
- pm_cfg[i].action = adapter->npars[i].enable_pm;
- pm_cfg[i].dest_npar = 0;
- pm_cfg[i].pci_func = i;
- }
- memcpy(buf, &pm_cfg, size);
+ pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
+ pm_cfg[pci_func].dest_npar = 0;
+ pm_cfg[pci_func].pci_func = i;
+ }
return size;
}
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, ret;
u32 op_mode;
u8 pci_func;
- int i;
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ if (qlcnic_82xx_check(adapter))
+ op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ else
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
+ if (pci_func >= ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
- }
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
- if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
- QLCNIC_NON_PRIV_FUNC) {
+ if (qlcnic_82xx_check(adapter)) {
+ ret = QLC_DEV_GET_DRV(op_mode, pci_func);
+ } else {
+ ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ pci_func);
+ esw_cfg[i].offload_flags = 0;
+ }
+
+ if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
return QL_STATUS_INVALID_PARAM;
if (esw_cfg[i].mac_override != 1)
@@ -444,6 +532,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
return QL_STATUS_INVALID_PARAM;
}
}
+
return 0;
}
@@ -458,7 +547,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct qlcnic_esw_func_cfg *esw_cfg;
struct qlcnic_npar_info *npar;
int count, rem, i, ret;
- u8 pci_func, op_mode = 0;
+ int index;
+ u8 op_mode = 0, pci_func;
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -471,10 +561,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
return ret;
for (i = 0; i < count; i++) {
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
return QL_STATUS_INVALID_PARAM;
- }
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -487,6 +576,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ rtnl_lock();
+ qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
+ rtnl_unlock();
break;
case QLCNIC_ADD_VLAN:
qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
@@ -503,7 +595,10 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- npar = &adapter->npars[pci_func];
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+ npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
npar->promisc_mode = esw_cfg[i].promisc_mode;
@@ -532,21 +627,28 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
- u8 i;
-
- if (size != sizeof(esw_cfg))
- return QL_STATUS_INVALID_PARAM;
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ u8 pci_func;
+ u32 count;
+ int i;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ memset(buf, 0, size);
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
continue;
- esw_cfg[i].pci_func = i;
- if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+
+ esw_cfg[pci_func].pci_func = pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
- memcpy(buf, &esw_cfg, size);
-
return size;
}
@@ -558,10 +660,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
@@ -581,7 +680,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_info nic_info;
struct qlcnic_npar_func_cfg *np_cfg;
- int i, count, rem, ret;
+ int i, count, rem, ret, index;
u8 pci_func;
count = size / sizeof(struct qlcnic_npar_func_cfg);
@@ -594,8 +693,10 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
- for (i = 0; i < count ; i++) {
+ for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (ret)
return ret;
@@ -605,12 +706,14 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
ret = qlcnic_set_nic_info(adapter, &nic_info);
if (ret)
return ret;
- adapter->npars[i].min_bw = nic_info.min_tx_bw;
- adapter->npars[i].max_bw = nic_info.max_tx_bw;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+ adapter->npars[index].min_bw = nic_info.min_tx_bw;
+ adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
return size;
-
}
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
@@ -621,30 +724,41 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
+ u8 pci_func;
int i, ret;
+ u32 count;
- if (size != sizeof(np_cfg))
- return QL_STATUS_INVALID_PARAM;
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ memset(buf, 0, size);
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
continue;
- ret = qlcnic_get_nic_info(adapter, &nic_info, i);
+ }
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+ pci_func = adapter->npars[i].pci_func;
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (ret)
return ret;
- np_cfg[i].pci_func = i;
- np_cfg[i].op_mode = (u8)nic_info.op_mode;
- np_cfg[i].port_num = nic_info.phys_port;
- np_cfg[i].fw_capab = nic_info.capabilities;
- np_cfg[i].min_bw = nic_info.min_tx_bw;
- np_cfg[i].max_bw = nic_info.max_tx_bw;
- np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
- np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
+ np_cfg[pci_func].pci_func = pci_func;
+ np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
+ np_cfg[pci_func].port_num = nic_info.phys_port;
+ np_cfg[pci_func].fw_capab = nic_info.capabilities;
+ np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
+ np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
+ np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
}
- memcpy(buf, &np_cfg, size);
return size;
}
@@ -659,10 +773,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct qlcnic_esw_statistics port_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -691,6 +808,9 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct qlcnic_esw_statistics esw_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
@@ -722,6 +842,9 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
return QL_STATUS_INVALID_PARAM;
@@ -744,11 +867,15 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
char *buf, loff_t offset,
size_t size)
{
+
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
- if (offset >= QLCNIC_MAX_PCI_FUNC)
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -772,14 +899,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
+ struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
int i, ret;
+ u32 count;
- if (size != sizeof(pci_cfg))
- return QL_STATUS_INVALID_PARAM;
-
- pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -789,19 +914,260 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return ret;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_pci_func_cfg);
+ for (i = 0; i < count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].func_state = 0;
pci_cfg[i].port_num = pci_info[i].default_port;
pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
- memcpy(buf, &pci_cfg, size);
+
kfree(pci_info);
return size;
}
+static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ unsigned char *p_read_buf;
+ int ret, count;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!size)
+ return QL_STATUS_INVALID_PARAM;
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ count = size / sizeof(u32);
+
+ if (size % sizeof(u32))
+ count++;
+
+ p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_read_buf)
+ return -ENOMEM;
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_read_buf);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
+ count);
+
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ kfree(p_read_buf);
+ return ret;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+ memcpy(buf, p_read_buf, size);
+ kfree(p_read_buf);
+
+ return size;
+}
+
+static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
+ ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
+ (u32 *)p_src,
+ QLC_83XX_FLASH_WRITE_MAX);
+
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset, size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32);
+ offset = offset + sizeof(u32);
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int ret;
+ static int flash_mode;
+ unsigned long data;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = kstrtoul(buf, 16, &data);
+
+ switch (data) {
+ case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+ flash_mode = QLC_83XX_ERASE_MODE;
+ ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ break;
+
+ case QLC_83XX_FLASH_BULK_WRITE_CMD:
+ flash_mode = QLC_83XX_BULK_WRITE_MODE;
+ break;
+
+ case QLC_83XX_FLASH_WRITE_CMD:
+ flash_mode = QLC_83XX_WRITE_MODE;
+ break;
+ default:
+ if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+
+ if (flash_mode == QLC_83XX_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__,
+ __LINE__);
+ return -EIO;
+ }
+ }
+ }
+
+ return size;
+}
+
static struct device_attribute dev_attr_bridged_mode = {
.attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = qlcnic_show_bridged_mode,
@@ -876,6 +1242,75 @@ static struct bin_attribute bin_attr_pm_config = {
.write = qlcnic_sysfs_write_pm_config,
};
+static struct bin_attribute bin_attr_flash = {
+ .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_83xx_sysfs_flash_read_handler,
+ .write = qlcnic_83xx_sysfs_flash_write_handler,
+};
+
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned int temperature = 0, value = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+ else if (qlcnic_82xx_check(adapter))
+ value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temperature = qlcnic_get_temp_val(value);
+ /* display millidegree celcius */
+ temperature *= 1000;
+ return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct device *hwmon_dev;
+
+ /* Skip hwmon registration for a VF device */
+ if (qlcnic_sriov_vf_check(adapter)) {
+ adapter->ahw->hwmon_dev = NULL;
+ return;
+ }
+ hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+ adapter,
+ qlcnic_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+ PTR_ERR(hwmon_dev));
+ hwmon_dev = NULL;
+ }
+ adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+ if (hwmon_dev) {
+ hwmon_device_unregister(hwmon_dev);
+ adapter->ahw->hwmon_dev = NULL;
+ }
+}
+#endif
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -894,10 +1329,9 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_bridged_mode);
}
-void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -911,11 +1345,12 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
+
if (device_create_file(dev, &dev_attr_beacon))
dev_info(dev, "failed to create beacon sysfs entry");
@@ -933,10 +1368,9 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create eswitch stats sysfs entry");
}
-void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -945,8 +1379,10 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
+
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_file(dev, &dev_attr_beacon);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -958,3 +1394,31 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_bin_file(dev, &bin_attr_pm_config);
device_remove_bin_file(dev, &bin_attr_esw_stats);
}
+
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
+
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ qlcnic_create_diag_entries(adapter);
+
+ if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
+ dev_info(dev, "failed to create flash sysfs entry\n");
+}
+
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ qlcnic_remove_diag_entries(adapter);
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index a131d7b5d2f..ef332708e5f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.31"
+#define DRV_VERSION "1.00.00.34"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2149,7 +2149,7 @@ struct ql_adapter {
struct timer_list timer;
atomic_t lb_count;
/* Keep local copy of current mac address. */
- char current_mac_addr[6];
+ char current_mac_addr[ETH_ALEN];
};
/*
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern const struct ethtool_ops qlge_ethtool_ops;
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+ u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+ u16 q_id);
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+ int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,6 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2261,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
/* #define QL_OB_DUMP */
#ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2274,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
#endif
#ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
#else
#define QL_DUMP_STAT(qdev)
#endif
#ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
#else
#define QL_DUMP_QDEV(qdev)
#endif
#ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2311,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#endif
#ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
#else
@@ -2325,14 +2322,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#endif
#ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
#else
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
#endif
#ifdef QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
#else
#define QL_DUMP_ALL(qdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0..829be21f97b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
int i;
if (!mpi_coredump) {
- netif_err(qdev, drv, qdev->ndev, "No memory available\n");
- return -ENOMEM;
+ netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
+ return -EINVAL;
}
/* Try to get the spinlock, but dont worry if
@@ -1242,8 +1242,8 @@ static void ql_get_core_dump(struct ql_adapter *qdev)
ql_queue_fw_error(qdev);
}
-void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
+static void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
{
int i, status;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 6f316ab2325..c3c514e332b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -1,5 +1,4 @@
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -181,6 +180,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
+#define QLGE_RCV_MAC_ERR_STATS 7
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@@ -280,6 +280,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
iter++;
}
+ /* Update receive mac error statistics */
+ iter += QLGE_RCV_MAC_ERR_STATS;
+
/*
* Get Per-priority TX pause frame counter statistics.
*/
@@ -379,13 +382,13 @@ static int ql_get_settings(struct net_device *ndev,
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
STS_LINK_TYPE_10GBASET) {
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
+ ecmd->autoneg = AUTONEG_ENABLE;
} else {
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 3e73742024b..b40050e03a5 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -6,7 +6,6 @@
* Ron Mercer <ron.mercer@qlogic.com>
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
@@ -96,8 +95,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
@@ -409,7 +410,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(qdev->
func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
@@ -1106,6 +1107,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
+ rx_ring->pg_chunk.page = NULL;
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
return -ENOMEM;
@@ -1211,8 +1213,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1434,11 +1434,13 @@ map_error:
}
/* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+ struct rx_ring *rx_ring)
{
struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++;
+ rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@ -1463,6 +1465,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
}
}
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ void *page, size_t *len)
+{
+ u16 *tags;
+
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ return;
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+ tags = (u16 *)page;
+ /* Look for stacked vlan tags in ethertype field */
+ if (tags[6] == ETH_P_8021Q &&
+ tags[8] == ETH_P_8021Q)
+ *len += 2 * VLAN_HLEN;
+ else
+ *len += VLAN_HLEN;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1474,6 +1499,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
napi->dev = qdev->ndev;
skb = napi_get_frags(napi);
@@ -1500,7 +1531,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
napi_gro_frags(napi);
}
@@ -1516,11 +1547,10 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, need to unwind!.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
@@ -1529,25 +1559,34 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ goto err_out;
+ }
+
+ /* Update the MAC header length*/
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
- if (skb->len > ndev->mtu + ETH_HLEN) {
+ if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
- memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset+ETH_HLEN,
- length-ETH_HLEN);
- skb->len += length-ETH_HLEN;
- skb->data_len += length-ETH_HLEN;
- skb->truesize += length-ETH_HLEN;
+ lbq_desc->p.pg_chunk.offset + hlen,
+ length - hlen);
+ skb->len += length - hlen;
+ skb->data_len += length - hlen;
+ skb->truesize += length - hlen;
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
@@ -1565,7 +1604,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph =
- (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+ (struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1578,7 +1617,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(napi, skb);
else
@@ -1605,8 +1644,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (new_skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "No skb available, drop the packet.\n");
rx_ring->rx_dropped++;
return;
}
@@ -1614,6 +1651,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1676,7 +1720,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
skb_record_rx_queue(skb, rx_ring->cq_id);
if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
@@ -1710,7 +1754,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct bq_desc *sbq_desc;
struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ size_t hlen = ETH_HLEN;
/*
* Handle the header buffer if present.
@@ -1837,9 +1882,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->data_len += length;
skb->truesize += length;
length -= length;
- __pskb_pull_tail(skb,
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+ lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
} else {
/*
@@ -1894,8 +1940,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length -= size;
i++;
}
- __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
- VLAN_ETH_HLEN : ETH_HLEN);
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+ &hlen);
+ __pskb_pull_tail(skb, hlen);
}
return skb;
}
@@ -1919,6 +1966,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1980,8 +2034,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
- __vlan_hwaccel_put_tag(skb, vlan_id);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb);
else
@@ -1994,18 +2048,13 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+ (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
- return (unsigned long)length;
- }
-
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
@@ -2285,7 +2334,7 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
@@ -2293,17 +2342,44 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
}
}
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status = 0;
+
+ status = ql_adapter_down(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring down the adapter\n");
+ return status;
+ }
+
+ /* update the features with resent change */
+ ndev->features = features;
+
+ status = ql_adapter_up(qdev);
+ if (status) {
+ netif_err(qdev, link, qdev->ndev,
+ "Failed to bring up the adapter\n");
+ return status;
+ }
+ return status;
+}
+
static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
- */
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
- else
- features &= ~NETIF_F_HW_VLAN_TX;
+ int err;
+
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
return features;
}
@@ -2313,7 +2389,7 @@ static int qlge_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
return 0;
@@ -2332,7 +2408,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
return err;
}
-static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
@@ -2363,7 +2439,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
return err;
}
-static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
@@ -2480,11 +2556,10 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
if (skb_is_gso(skb)) {
int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
@@ -2761,6 +2836,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
curr_idx = 0;
}
+ if (rx_ring->pg_chunk.page) {
+ pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ put_page(rx_ring->pg_chunk.page);
+ rx_ring->pg_chunk.page = NULL;
+ }
}
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
@@ -2920,14 +3001,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate small buffer queue control blocks.
*/
- rx_ring->sbq =
- kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue control block allocation failed.\n");
+ rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->sbq == NULL)
goto err_mem;
- }
ql_init_sbq_ring(qdev, rx_ring);
}
@@ -2948,14 +3026,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate large buffer queue control blocks.
*/
- rx_ring->lbq =
- kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue control block allocation failed.\n");
+ rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->lbq == NULL)
goto err_mem;
- }
ql_init_lbq_ring(qdev, rx_ring);
}
@@ -3255,24 +3330,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
- /* Loop to get our vectors. We start with
- * what we want and settle for what we get.
- */
- do {
- err = pci_enable_msix(qdev->pdev,
- qdev->msi_x_entry, qdev->intr_count);
- if (err > 0)
- qdev->intr_count = err;
- } while (err > 0);
-
+ err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+ 1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
- qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
- } else if (err == 0) {
+ } else {
+ qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
@@ -3528,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
}
return status;
err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
ql_free_irq(qdev);
return status;
}
@@ -3687,8 +3754,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
- mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+ value = NIC_RCV_CFG_DFQ;
+ mask = NIC_RCV_CFG_DFQ_MASK;
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ value |= NIC_RCV_CFG_RV;
+ mask |= (NIC_RCV_CFG_RV << 16);
+ }
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
@@ -4488,7 +4559,6 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->doorbell_area);
vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
}
static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4572,7 +4642,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
if (qdev->mpi_coredump == NULL) {
- dev_err(&pdev->dev, "Coredump alloc failed.\n");
err = -ENOMEM;
goto err_out2;
}
@@ -4586,7 +4655,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
goto err_out2;
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
/* Keep local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
@@ -4677,12 +4745,20 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ ndev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXCSUM;
+ ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
+ /* vlan gets same features (except vlan filter) */
+ ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
@@ -4694,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->irq = pdev->irq;
ndev->netdev_ops = &qlge_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+ ndev->ethtool_ops = &qlge_ethtool_ops;
ndev->watchdog_timeo = 10 * HZ;
err = register_netdev(ndev);
@@ -4702,6 +4778,7 @@ static int qlge_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "net device registration failed.\n");
ql_release_all(pdev);
pci_disable_device(pdev);
+ free_netdev(ndev);
return err;
}
/* Start up the timer to trigger EEH if
@@ -4930,15 +5007,4 @@ static struct pci_driver qlge_driver = {
.err_handler = &qlge_err_handler
};
-static int __init qlge_init_module(void)
-{
- return pci_register_driver(&qlge_driver);
-}
-
-static void __exit qlge_exit(void)
-{
- pci_unregister_driver(&qlge_driver);
-}
-
-module_init(qlge_init_module);
-module_exit(qlge_exit);
+module_pci_driver(qlge_driver);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e24..7ad146080c3 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
return;
}
- if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev->core_is_dumped = 1;
queue_delayed_work(qdev->workqueue,