aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/qlge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h201
-rw-r--r--drivers/net/qlge/qlge_dbg.c180
-rw-r--r--drivers/net/qlge/qlge_ethtool.c178
-rw-r--r--drivers/net/qlge/qlge_main.c392
-rw-r--r--drivers/net/qlge/qlge_mpi.c154
5 files changed, 975 insertions, 130 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index c2383adcd52..73c7fd2badc 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -54,8 +54,10 @@
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define SMALL_BUFFER_SIZE 256
-#define LARGE_BUFFER_SIZE PAGE_SIZE
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define LARGE_BUFFER_MAX_SIZE 8192
+#define LARGE_BUFFER_MIN_SIZE 2048
#define MAX_SPLIT_SIZE 1023
#define QLGE_SB_PAD 32
@@ -795,6 +797,7 @@ enum {
MB_WOL_BCAST = (1 << 5),
MB_WOL_LINK_UP = (1 << 6),
MB_WOL_LINK_DOWN = (1 << 7),
+ MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
@@ -804,6 +807,9 @@ enum {
MB_CMD_SET_PORT_CFG = 0x00000122,
MB_CMD_GET_PORT_CFG = 0x00000123,
MB_CMD_GET_LINK_STS = 0x00000124,
+ MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
+ QL_LED_BLINK = 0x03e803e8,
+ MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
MB_SET_MPI_TFK_STOP = (1 << 0),
MB_SET_MPI_TFK_RESUME = (1 << 1),
@@ -1201,9 +1207,17 @@ struct tx_ring_desc {
struct tx_ring_desc *next;
};
+struct page_chunk {
+ struct page *page; /* master page */
+ char *va; /* virt addr for this chunk */
+ u64 map; /* mapping for master */
+ unsigned int offset; /* offset for this chunk */
+ unsigned int last_flag; /* flag set for last chunk in page */
+};
+
struct bq_desc {
union {
- struct page *lbq_page;
+ struct page_chunk pg_chunk;
struct sk_buff *skb;
} p;
__le64 *addr;
@@ -1272,6 +1286,7 @@ struct rx_ring {
dma_addr_t lbq_base_dma;
void *lbq_base_indirect;
dma_addr_t lbq_base_indirect_dma;
+ struct page_chunk pg_chunk; /* current page for chunks */
struct bq_desc *lbq; /* array of control blocks */
void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
u32 lbq_prod_idx; /* current sw prod idx */
@@ -1363,6 +1378,174 @@ struct nic_stats {
u64 rx_1024_to_1518_pkts;
u64 rx_1519_to_max_pkts;
u64 rx_len_err_pkts;
+ /*
+ * These stats come from offset 500h to 5C8h
+ * in the XGMAC register.
+ */
+ u64 tx_cbfc_pause_frames0;
+ u64 tx_cbfc_pause_frames1;
+ u64 tx_cbfc_pause_frames2;
+ u64 tx_cbfc_pause_frames3;
+ u64 tx_cbfc_pause_frames4;
+ u64 tx_cbfc_pause_frames5;
+ u64 tx_cbfc_pause_frames6;
+ u64 tx_cbfc_pause_frames7;
+ u64 rx_cbfc_pause_frames0;
+ u64 rx_cbfc_pause_frames1;
+ u64 rx_cbfc_pause_frames2;
+ u64 rx_cbfc_pause_frames3;
+ u64 rx_cbfc_pause_frames4;
+ u64 rx_cbfc_pause_frames5;
+ u64 rx_cbfc_pause_frames6;
+ u64 rx_cbfc_pause_frames7;
+ u64 rx_nic_fifo_drop;
+};
+
+/* Address/Length pairs for the coredump. */
+enum {
+ MPI_CORE_REGS_ADDR = 0x00030000,
+ MPI_CORE_REGS_CNT = 127,
+ MPI_CORE_SH_REGS_CNT = 16,
+ TEST_REGS_ADDR = 0x00001000,
+ TEST_REGS_CNT = 23,
+ RMII_REGS_ADDR = 0x00001040,
+ RMII_REGS_CNT = 64,
+ FCMAC1_REGS_ADDR = 0x00001080,
+ FCMAC2_REGS_ADDR = 0x000010c0,
+ FCMAC_REGS_CNT = 64,
+ FC1_MBX_REGS_ADDR = 0x00001100,
+ FC2_MBX_REGS_ADDR = 0x00001240,
+ FC_MBX_REGS_CNT = 64,
+ IDE_REGS_ADDR = 0x00001140,
+ IDE_REGS_CNT = 64,
+ NIC1_MBX_REGS_ADDR = 0x00001180,
+ NIC2_MBX_REGS_ADDR = 0x00001280,
+ NIC_MBX_REGS_CNT = 64,
+ SMBUS_REGS_ADDR = 0x00001200,
+ SMBUS_REGS_CNT = 64,
+ I2C_REGS_ADDR = 0x00001fc0,
+ I2C_REGS_CNT = 64,
+ MEMC_REGS_ADDR = 0x00003000,
+ MEMC_REGS_CNT = 256,
+ PBUS_REGS_ADDR = 0x00007c00,
+ PBUS_REGS_CNT = 256,
+ MDE_REGS_ADDR = 0x00010000,
+ MDE_REGS_CNT = 6,
+ CODE_RAM_ADDR = 0x00020000,
+ CODE_RAM_CNT = 0x2000,
+ MEMC_RAM_ADDR = 0x00100000,
+ MEMC_RAM_CNT = 0x2000,
+};
+
+#define MPI_COREDUMP_COOKIE 0x5555aaaa
+struct mpi_coredump_global_header {
+ u32 cookie;
+ u8 idString[16];
+ u32 timeLo;
+ u32 timeHi;
+ u32 imageSize;
+ u32 headerSize;
+ u8 info[220];
+};
+
+struct mpi_coredump_segment_header {
+ u32 cookie;
+ u32 segNum;
+ u32 segSize;
+ u32 extra;
+ u8 description[16];
+};
+
+/* Reg dump segment numbers. */
+enum {
+ CORE_SEG_NUM = 1,
+ TEST_LOGIC_SEG_NUM = 2,
+ RMII_SEG_NUM = 3,
+ FCMAC1_SEG_NUM = 4,
+ FCMAC2_SEG_NUM = 5,
+ FC1_MBOX_SEG_NUM = 6,
+ IDE_SEG_NUM = 7,
+ NIC1_MBOX_SEG_NUM = 8,
+ SMBUS_SEG_NUM = 9,
+ FC2_MBOX_SEG_NUM = 10,
+ NIC2_MBOX_SEG_NUM = 11,
+ I2C_SEG_NUM = 12,
+ MEMC_SEG_NUM = 13,
+ PBUS_SEG_NUM = 14,
+ MDE_SEG_NUM = 15,
+ NIC1_CONTROL_SEG_NUM = 16,
+ NIC2_CONTROL_SEG_NUM = 17,
+ NIC1_XGMAC_SEG_NUM = 18,
+ NIC2_XGMAC_SEG_NUM = 19,
+ WCS_RAM_SEG_NUM = 20,
+ MEMC_RAM_SEG_NUM = 21,
+ XAUI_AN_SEG_NUM = 22,
+ XAUI_HSS_PCS_SEG_NUM = 23,
+ XFI_AN_SEG_NUM = 24,
+ XFI_TRAIN_SEG_NUM = 25,
+ XFI_HSS_PCS_SEG_NUM = 26,
+ XFI_HSS_TX_SEG_NUM = 27,
+ XFI_HSS_RX_SEG_NUM = 28,
+ XFI_HSS_PLL_SEG_NUM = 29,
+ MISC_NIC_INFO_SEG_NUM = 30,
+ INTR_STATES_SEG_NUM = 31,
+ CAM_ENTRIES_SEG_NUM = 32,
+ ROUTING_WORDS_SEG_NUM = 33,
+ ETS_SEG_NUM = 34,
+ PROBE_DUMP_SEG_NUM = 35,
+ ROUTING_INDEX_SEG_NUM = 36,
+ MAC_PROTOCOL_SEG_NUM = 37,
+ XAUI2_AN_SEG_NUM = 38,
+ XAUI2_HSS_PCS_SEG_NUM = 39,
+ XFI2_AN_SEG_NUM = 40,
+ XFI2_TRAIN_SEG_NUM = 41,
+ XFI2_HSS_PCS_SEG_NUM = 42,
+ XFI2_HSS_TX_SEG_NUM = 43,
+ XFI2_HSS_RX_SEG_NUM = 44,
+ XFI2_HSS_PLL_SEG_NUM = 45,
+ SEM_REGS_SEG_NUM = 50
+
+};
+
+struct ql_nic_misc {
+ u32 rx_ring_count;
+ u32 tx_ring_count;
+ u32 intr_count;
+ u32 function;
+};
+
+struct ql_reg_dump {
+
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[64];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_CPUS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[8+2];
};
/*
@@ -1505,6 +1688,7 @@ struct ql_adapter {
struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS];
+ unsigned int lbq_buf_order;
int rx_csum;
u32 default_rx_queue;
@@ -1519,11 +1703,11 @@ struct ql_adapter {
u32 port_init;
u32 link_status;
u32 link_config;
+ u32 led_config;
u32 max_frame_size;
union flash_params flash;
- struct net_device_stats stats;
struct workqueue_struct *workqueue;
struct delayed_work asic_reset_work;
struct delayed_work mpi_reset_work;
@@ -1611,10 +1795,19 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
int ql_mb_about_fw(struct ql_adapter *qdev);
+int ql_wol(struct ql_adapter *qdev);
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
+int ql_mb_get_led_cfg(struct ql_adapter *qdev);
void ql_link_on(struct ql_adapter *qdev);
void ql_link_off(struct ql_adapter *qdev);
int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
+int ql_mb_get_port_cfg(struct ql_adapter *qdev);
+int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump);
#if 1
#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index aa88cb3f41c..9f58c471076 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,185 @@
#include "qlge.h"
+
+static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
+{
+ int status = 0;
+ int i;
+
+ for (i = 0; i < 8; i++, buf++) {
+ ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
+ *buf = ql_read32(qdev, NIC_ETS);
+ }
+
+ for (i = 0; i < 2; i++, buf++) {
+ ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
+ *buf = ql_read32(qdev, CNA_ETS);
+ }
+
+ return status;
+}
+
+static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
+{
+ int i;
+
+ for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
+ ql_write32(qdev, INTR_EN,
+ qdev->intr_context[i].intr_read_mask);
+ *buf = ql_read32(qdev, INTR_EN);
+ }
+}
+
+static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
+{
+ int i, status;
+ u32 value[3];
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return status;
+
+ for (i = 0; i < 16; i++) {
+ status = ql_get_mac_addr_reg(qdev,
+ MAC_ADDR_TYPE_CAM_MAC, i, value);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed read of mac index register.\n");
+ goto err;
+ }
+ *buf++ = value[0]; /* lower MAC address */
+ *buf++ = value[1]; /* upper MAC address */
+ *buf++ = value[2]; /* output */
+ }
+ for (i = 0; i < 32; i++) {
+ status = ql_get_mac_addr_reg(qdev,
+ MAC_ADDR_TYPE_MULTI_MAC, i, value);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed read of mac index register.\n");
+ goto err;
+ }
+ *buf++ = value[0]; /* lower Mcast address */
+ *buf++ = value[1]; /* upper Mcast address */
+ }
+err:
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+ return status;
+}
+
+static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
+{
+ int status;
+ u32 value, i;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (i = 0; i < 16; i++) {
+ status = ql_get_routing_reg(qdev, i, &value);
+ if (status) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed read of routing index register.\n");
+ goto err;
+ } else {
+ *buf++ = value;
+ }
+ }
+err:
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Create a coredump segment header */
+static void ql_build_coredump_seg_header(
+ struct mpi_coredump_segment_header *seg_hdr,
+ u32 seg_number, u32 seg_size, u8 *desc)
+{
+ memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
+ seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+ seg_hdr->segNum = seg_number;
+ seg_hdr->segSize = seg_size;
+ memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+}
+
+void ql_gen_reg_dump(struct ql_adapter *qdev,
+ struct ql_reg_dump *mpi_coredump)
+{
+ int i, status;
+
+
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_reg_dump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+
+ /* segment 16 */
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 16, Rev C. Step 18 */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_regs),
+ "NIC Registers");
+ /* Get generic reg dump */
+ for (i = 0; i < 64; i++)
+ mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ return;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ return;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ return;
+}
+
#ifdef QL_REG_DUMP
static void ql_dump_intr_states(struct ql_adapter *qdev)
{
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 52073946bce..62c4af05780 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -132,6 +132,41 @@ static void ql_update_stats(struct ql_adapter *qdev)
iter++;
}
+ /*
+ * Get Per-priority TX pause frame counter statistics.
+ */
+ for (i = 0x500; i < 0x540; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ QPRINTK(qdev, DRV, ERR,
+ "Error reading status register 0x%.04x.\n", i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get Per-priority RX pause frame counter statistics.
+ */
+ for (i = 0x568; i < 0x5a8; i += 8) {
+ if (ql_read_xgmac_reg64(qdev, i, &data)) {
+ QPRINTK(qdev, DRV, ERR,
+ "Error reading status register 0x%.04x.\n", i);
+ goto end;
+ } else
+ *iter = data;
+ iter++;
+ }
+
+ /*
+ * Get RX NIC FIFO DROP statistics.
+ */
+ if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
+ QPRINTK(qdev, DRV, ERR,
+ "Error reading status register 0x%.04x.\n", i);
+ goto end;
+ } else
+ *iter = data;
end:
ql_sem_unlock(qdev, qdev->xg_sem_mask);
quit:
@@ -185,6 +220,23 @@ static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
{"rx_1024_to_1518_pkts"},
{"rx_1519_to_max_pkts"},
{"rx_len_err_pkts"},
+ {"tx_cbfc_pause_frames0"},
+ {"tx_cbfc_pause_frames1"},
+ {"tx_cbfc_pause_frames2"},
+ {"tx_cbfc_pause_frames3"},
+ {"tx_cbfc_pause_frames4"},
+ {"tx_cbfc_pause_frames5"},
+ {"tx_cbfc_pause_frames6"},
+ {"tx_cbfc_pause_frames7"},
+ {"rx_cbfc_pause_frames0"},
+ {"rx_cbfc_pause_frames1"},
+ {"rx_cbfc_pause_frames2"},
+ {"rx_cbfc_pause_frames3"},
+ {"rx_cbfc_pause_frames4"},
+ {"rx_cbfc_pause_frames5"},
+ {"rx_cbfc_pause_frames6"},
+ {"rx_cbfc_pause_frames7"},
+ {"rx_nic_fifo_drop"},
};
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -257,6 +309,23 @@ ql_get_ethtool_stats(struct net_device *ndev,
*data++ = s->rx_1024_to_1518_pkts;
*data++ = s->rx_1519_to_max_pkts;
*data++ = s->rx_len_err_pkts;
+ *data++ = s->tx_cbfc_pause_frames0;
+ *data++ = s->tx_cbfc_pause_frames1;
+ *data++ = s->tx_cbfc_pause_frames2;
+ *data++ = s->tx_cbfc_pause_frames3;
+ *data++ = s->tx_cbfc_pause_frames4;
+ *data++ = s->tx_cbfc_pause_frames5;
+ *data++ = s->tx_cbfc_pause_frames6;
+ *data++ = s->tx_cbfc_pause_frames7;
+ *data++ = s->rx_cbfc_pause_frames0;
+ *data++ = s->rx_cbfc_pause_frames1;
+ *data++ = s->rx_cbfc_pause_frames2;
+ *data++ = s->rx_cbfc_pause_frames3;
+ *data++ = s->rx_cbfc_pause_frames4;
+ *data++ = s->rx_cbfc_pause_frames5;
+ *data++ = s->rx_cbfc_pause_frames6;
+ *data++ = s->rx_cbfc_pause_frames7;
+ *data++ = s->rx_nic_fifo_drop;
}
static int ql_get_settings(struct net_device *ndev,
@@ -302,6 +371,77 @@ static void ql_get_drvinfo(struct net_device *ndev,
drvinfo->eedump_len = 0;
}
+static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ /* What we support. */
+ wol->supported = WAKE_MAGIC;
+ /* What we've currently got set. */
+ wol->wolopts = qdev->wol;
+}
+
+static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+ qdev->wol = wol->wolopts;
+
+ QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
+ qdev->wol, ndev->name);
+ if (!qdev->wol) {
+ u32 wol = 0;
+ status = ql_mb_wol_mode(qdev, wol);
+ QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "cleared sucessfully" : "clear failed",
+ wol, qdev->ndev->name);
+ }
+
+ return 0;
+}
+
+static int ql_phys_id(struct net_device *ndev, u32 data)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ u32 led_reg, i;
+ int status;
+
+ /* Save the current LED settings */
+ status = ql_mb_get_led_cfg(qdev);
+ if (status)
+ return status;
+ led_reg = qdev->led_config;
+
+ /* Start blinking the led */
+ if (!data || data > 300)
+ data = 300;
+
+ for (i = 0; i < (data * 10); i++)
+ ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+
+ /* Restore LED settings */
+ status = ql_mb_set_led_cfg(qdev, led_reg);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int ql_get_regs_len(struct net_device *ndev)
+{
+ return sizeof(struct ql_reg_dump);
+}
+
+static void ql_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ ql_gen_reg_dump(qdev, p);
+}
+
static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
struct ql_adapter *qdev = netdev_priv(dev);
@@ -355,6 +495,37 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
return ql_update_ring_coalescing(qdev);
}
+static void ql_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql_adapter *qdev = netdev_priv(netdev);
+
+ ql_mb_get_port_cfg(qdev);
+ if (qdev->link_config & CFG_PAUSE_STD) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+static int ql_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql_adapter *qdev = netdev_priv(netdev);
+ int status = 0;
+
+ if ((pause->rx_pause) && (pause->tx_pause))
+ qdev->link_config |= CFG_PAUSE_STD;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ qdev->link_config &= ~CFG_PAUSE_STD;
+ else
+ return -EINVAL;
+
+ status = ql_mb_set_port_cfg(qdev);
+ if (status)
+ return status;
+ return status;
+}
+
static u32 ql_get_rx_csum(struct net_device *netdev)
{
struct ql_adapter *qdev = netdev_priv(netdev);
@@ -396,9 +567,16 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
const struct ethtool_ops qlge_ethtool_ops = {
.get_settings = ql_get_settings,
.get_drvinfo = ql_get_drvinfo,
+ .get_wol = ql_get_wol,
+ .set_wol = ql_set_wol,
+ .get_regs_len = ql_get_regs_len,
+ .get_regs = ql_get_regs,
.get_msglevel = ql_get_msglevel,
.set_msglevel = ql_set_msglevel,
.get_link = ethtool_op_get_link,
+ .phys_id = ql_phys_id,
+ .get_pauseparam = ql_get_pauseparam,
+ .set_pauseparam = ql_set_pauseparam,
.get_rx_csum = ql_get_rx_csum,
.set_rx_csum = ql_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index cea7531f4f4..42ad811ec31 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1025,6 +1025,11 @@ end:
return status;
}
+static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
+{
+ return PAGE_SIZE << qdev->lbq_buf_order;
+}
+
/* Get the next large buffer. */
static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
{
@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
return lbq_desc;
}
+static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
+{
+ struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ pci_unmap_addr(lbq_desc, mapaddr),
+ rx_ring->lbq_buf_size,
+ PCI_DMA_FROMDEVICE);
+
+ /* If it's the last chunk of our master page then
+ * we unmap it.
+ */
+ if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
+ == ql_lbq_block_size(qdev))
+ pci_unmap_page(qdev->pdev,
+ lbq_desc->p.pg_chunk.map,
+ ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ return lbq_desc;
+}
+
/* Get the next small buffer. */
static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
{
@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
+static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
+ struct bq_desc *lbq_desc)
+{
+ if (!rx_ring->pg_chunk.page) {
+ u64 map;
+ rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ GFP_ATOMIC,
+ qdev->lbq_buf_order);
+ if (unlikely(!rx_ring->pg_chunk.page)) {
+ QPRINTK(qdev, DRV, ERR,
+ "page allocation failed.\n");
+ return -ENOMEM;
+ }
+ rx_ring->pg_chunk.offset = 0;
+ map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
+ 0, ql_lbq_block_size(qdev),
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, map)) {
+ __free_pages(rx_ring->pg_chunk.page,
+ qdev->lbq_buf_order);
+ QPRINTK(qdev, DRV, ERR,
+ "PCI mapping failed.\n");
+ return -ENOMEM;
+ }
+ rx_ring->pg_chunk.map = map;
+ rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+ }
+
+ /* Copy the current master pg_chunk info
+ * to the current descriptor.
+ */
+ lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+
+ /* Adjust the master page chunk for next
+ * buffer get.
+ */
+ rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
+ if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
+ rx_ring->pg_chunk.page = NULL;
+ lbq_desc->p.pg_chunk.last_flag = 1;
+ } else {
+ rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
+ get_page(rx_ring->pg_chunk.page);
+ lbq_desc->p.pg_chunk.last_flag = 0;
+ }
+ return 0;
+}
/* Process (refill) a large buffer queue. */
static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
u64 map;
int i;
- while (rx_ring->lbq_free_cnt > 16) {
+ while (rx_ring->lbq_free_cnt > 32) {
for (i = 0; i < 16; i++) {
QPRINTK(qdev, RX_STATUS, DEBUG,
"lbq: try cleaning clean_idx = %d.\n",
clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
- if (lbq_desc->p.lbq_page == NULL) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: getting new page for index %d.\n",
- lbq_desc->index);
- lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
- if (lbq_desc->p.lbq_page == NULL) {
- rx_ring->lbq_clean_idx = clean_idx;
- QPRINTK(qdev, RX_STATUS, ERR,
- "Couldn't get a page.\n");
- return;
- }
- map = pci_map_page(qdev->pdev,
- lbq_desc->p.lbq_page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- rx_ring->lbq_clean_idx = clean_idx;
- put_page(lbq_desc->p.lbq_page);
- lbq_desc->p.lbq_page = NULL;
- QPRINTK(qdev, RX_STATUS, ERR,
- "PCI mapping failed.\n");
+ if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
+ QPRINTK(qdev, IFUP, ERR,
+ "Could not get a page chunk.\n");
return;
}
+
+ map = lbq_desc->p.pg_chunk.map +
+ lbq_desc->p.pg_chunk.offset;
pci_unmap_addr_set(lbq_desc, mapaddr, map);
- pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
+ pci_unmap_len_set(lbq_desc, maplen,
+ rx_ring->lbq_buf_size);
*lbq_desc->addr = cpu_to_le64(map);
- }
+
+ pci_dma_sync_single_for_device(qdev->pdev, map,
+ rx_ring->lbq_buf_size,
+ PCI_DMA_FROMDEVICE);
clean_idx++;
if (clean_idx == rx_ring->lbq_len)
clean_idx = 0;
@@ -1147,7 +1210,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
- rx_ring->sbq_buf_size);
+ SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
QPRINTK(qdev, PROBE, ERR,
"Couldn't get an skb.\n");
@@ -1157,8 +1220,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
map = pci_map_single(qdev->pdev,
sbq_desc->p.skb->data,
- rx_ring->sbq_buf_size /
- 2, PCI_DMA_FROMDEVICE);
+ rx_ring->sbq_buf_size,
+ PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
@@ -1168,7 +1231,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
pci_unmap_addr_set(sbq_desc, mapaddr, map);
pci_unmap_len_set(sbq_desc, maplen,
- rx_ring->sbq_buf_size / 2);
+ rx_ring->sbq_buf_size);
*sbq_desc->addr = cpu_to_le64(map);
}
@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* chain it to the header buffer's skb and let
* it rip.
*/
- lbq_desc = ql_get_curr_lbuf(rx_ring);
- pci_unmap_page(qdev->pdev,
- pci_unmap_addr(lbq_desc,
- mapaddr),
- pci_unmap_len(lbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
QPRINTK(qdev, RX_STATUS, DEBUG,
- "Chaining page to skb.\n");
- skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
- 0, length);
+ "Chaining page at offset = %d,"
+ "for %d bytes to skb.\n",
+ lbq_desc->p.pg_chunk.offset, length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
- lbq_desc->p.lbq_page = NULL;
} else {
/*
* The headers and data are in a single large buffer. We
* copy it to a new skb and let it go. This can happen with
* jumbo mtu on a non-TCP/UDP frame.
*/
- lbq_desc = ql_get_curr_lbuf(rx_ring);
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) {
QPRINTK(qdev, PROBE, DEBUG,
@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb_reserve(skb, NET_IP_ALIGN);
QPRINTK(qdev, RX_STATUS, DEBUG,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
- 0, length);
+ skb_fill_page_desc(skb, 0,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ length);
skb->len += length;
skb->data_len += length;
skb->truesize += length;
length -= length;
- lbq_desc->p.lbq_page = NULL;
__pskb_pull_tail(skb,
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
VLAN_ETH_HLEN : ETH_HLEN);
@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* frames. If the MTU goes up we could
* eventually be in trouble.
*/
- int size, offset, i = 0;
- __le64 *bq, bq_array[8];
+ int size, i = 0;
sbq_desc = ql_get_curr_sbuf(rx_ring);
pci_unmap_single(qdev->pdev,
pci_unmap_addr(sbq_desc, mapaddr),
@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
QPRINTK(qdev, RX_STATUS, DEBUG,
"%d bytes of headers & data in chain of large.\n", length);
skb = sbq_desc->p.skb;
- bq = &bq_array[0];
- memcpy(bq, skb->data, sizeof(bq_array));
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
- } else {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Headers in small, %d bytes of data in chain of large.\n", length);
- bq = (__le64 *)sbq_desc->p.skb->data;
}
while (length > 0) {
- lbq_desc = ql_get_curr_lbuf(rx_ring);
- pci_unmap_page(qdev->pdev,
- pci_unmap_addr(lbq_desc,
- mapaddr),
- pci_unmap_len(lbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
- size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
- offset = 0;
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ size = (length < rx_ring->lbq_buf_size) ? length :
+ rx_ring->lbq_buf_size;
QPRINTK(qdev, RX_STATUS, DEBUG,
"Adding page %d to skb for %d bytes.\n",
i, size);
- skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
- offset, size);
+ skb_fill_page_desc(skb, i,
+ lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset,
+ size);
skb->len += size;
skb->data_len += size;
skb->truesize += size;
length -= size;
- lbq_desc->p.lbq_page = NULL;
- bq++;
i++;
}
__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
@@ -1673,8 +1721,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
}
}
- qdev->stats.rx_packets++;
- qdev->stats.rx_bytes += skb->len;
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (qdev->vlgrp &&
@@ -1698,6 +1746,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
{
+ struct net_device *ndev = qdev->ndev;
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
@@ -1705,8 +1754,8 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
- qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
- qdev->stats.tx_packets++;
+ ndev->stats.tx_bytes += (tx_ring_desc->skb)->len;
+ ndev->stats.tx_packets++;
dev_kfree_skb(tx_ring_desc->skb);
tx_ring_desc->skb = NULL;
@@ -2304,20 +2353,29 @@ err:
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
- int i;
struct bq_desc *lbq_desc;
- for (i = 0; i < rx_ring->lbq_len; i++) {
- lbq_desc = &rx_ring->lbq[i];
- if (lbq_desc->p.lbq_page) {
+ uint32_t curr_idx, clean_idx;
+
+ curr_idx = rx_ring->lbq_curr_idx;
+ clean_idx = rx_ring->lbq_clean_idx;
+ while (curr_idx != clean_idx) {
+ lbq_desc = &rx_ring->lbq[curr_idx];
+
+ if (lbq_desc->p.pg_chunk.last_flag) {
pci_unmap_page(qdev->pdev,
- pci_unmap_addr(lbq_desc, mapaddr),
- pci_unmap_len(lbq_desc, maplen),
+ lbq_desc->p.pg_chunk.map,
+ ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
-
- put_page(lbq_desc->p.lbq_page);
- lbq_desc->p.lbq_page = NULL;
+ lbq_desc->p.pg_chunk.last_flag = 0;
}
+
+ put_page(lbq_desc->p.pg_chunk.page);
+ lbq_desc->p.pg_chunk.page = NULL;
+
+ if (++curr_idx == rx_ring->lbq_len)
+ curr_idx = 0;
+
}
}
@@ -2615,6 +2673,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
/* Set up the shadow registers for this ring. */
rx_ring->prod_idx_sh_reg = shadow_reg;
rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
+ *rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64);
rx_ring->lbq_base_indirect = shadow_reg;
@@ -2692,7 +2751,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq_base_indirect_dma);
cqicb->sbq_buf_size =
- cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
+ cpu_to_le16((u16)(rx_ring->sbq_buf_size));
bq_len = (rx_ring->sbq_len == 65536) ? 0 :
(u16) rx_ring->sbq_len;
cqicb->sbq_len = cpu_to_le16(bq_len);
@@ -3268,7 +3327,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, FSC, mask | value);
ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
- min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
+ min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
@@ -3276,6 +3335,22 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
* the same MAC address.
*/
ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+ /* Reroute all packets to our Interface.
+ * They may have been routed to MPI firmware
+ * due to WOL.
+ */
+ value = ql_read32(qdev, MGMT_RCV_CFG);
+ value &= ~MGMT_RCV_CFG_RM;
+ mask = 0xffff0000;
+
+ /* Sticky reg needs clearing due to WOL. */
+ ql_write32(qdev, MGMT_RCV_CFG, mask);
+ ql_write32(qdev, MGMT_RCV_CFG, mask | value);
+
+ /* Default WOL is enable on Mezz cards */
+ if (qdev->pdev->subsystem_device == 0x0068 ||
+ qdev->pdev->subsystem_device == 0x0180)
+ qdev->wol = WAKE_MAGIC;
/* Start up the rx queues. */
for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3310,10 +3385,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
- if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
- return status;
- }
+ if (status)
+ QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
@@ -3392,6 +3465,55 @@ static void ql_display_dev_info(struct net_device *ndev)
QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
}
+int ql_wol(struct ql_adapter *qdev)
+{
+ int status = 0;
+ u32 wol = MB_WOL_DISABLE;
+
+ /* The CAM is still intact after a reset, but if we
+ * are doing WOL, then we may need to program the
+ * routing regs. We would also need to issue the mailbox
+ * commands to instruct the MPI what to do per the ethtool
+ * settings.
+ */
+
+ if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
+ WAKE_MCAST | WAKE_BCAST)) {
+ QPRINTK(qdev, IFDOWN, ERR,
+ "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+ qdev->wol);
+ return -EINVAL;
+ }
+
+ if (qdev->wol & WAKE_MAGIC) {
+ status = ql_mb_wol_set_magic(qdev, 1);
+ if (status) {
+ QPRINTK(qdev, IFDOWN, ERR,
+ "Failed to set magic packet on %s.\n",
+ qdev->ndev->name);
+ return status;
+ } else
+ QPRINTK(qdev, DRV, INFO,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
+
+ wol |= MB_WOL_MAGIC_PKT;
+ }
+
+ if (qdev->wol) {
+ /* Reroute all packets to Management Interface */
+ ql_write32(qdev, MGMT_RCV_CFG, (MGMT_RCV_CFG_RM |
+ (MGMT_RCV_CFG_RM << 16)));
+ wol |= MB_WOL_MODE_ON;
+ status = ql_mb_wol_mode(qdev, wol);
+ QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "Sucessfully set" : "Failed", wol,
+ qdev->ndev->name);
+ }
+
+ return status;
+}
+
static int ql_adapter_down(struct ql_adapter *qdev)
{
int i, status = 0;
@@ -3497,6 +3619,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
+ unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+ LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+
+ qdev->lbq_buf_order = get_order(lbq_buf_len);
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
@@ -3544,11 +3670,14 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_len = NUM_LARGE_BUFFERS;
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
- rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
+ rx_ring->lbq_buf_size = (u16)lbq_buf_len;
+ QPRINTK(qdev, IFUP, DEBUG,
+ "lbq_buf_size %d, order = %d\n",
+ rx_ring->lbq_buf_size, qdev->lbq_buf_order);
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
- rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
+ rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
rx_ring->type = RX_Q;
} else {
/*
@@ -3594,14 +3723,63 @@ error_up:
return err;
}
+static int ql_change_rx_buffers(struct ql_adapter *qdev)
+{
+ struct rx_ring *rx_ring;
+ int i, status;
+ u32 lbq_buf_len;
+
+ /* Wait for an oustanding reset to complete. */
+ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ int i = 3;
+ while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ QPRINTK(qdev, IFUP, ERR,
+ "Waiting for adapter UP...\n");
+ ssleep(1);
+ }
+
+ if (!i) {
+ QPRINTK(qdev, IFUP, ERR,
+ "Timed out waiting for adapter UP\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ status = ql_adapter_down(qdev);
+ if (status)
+ goto error;
+
+ /* Get the new rx buffer size. */
+ lbq_buf_len = (qdev->ndev->mtu > 1500) ?
+ LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
+ qdev->lbq_buf_order = get_order(lbq_buf_len);
+
+ for (i = 0; i < qdev->rss_ring_count; i++) {
+ rx_ring = &qdev->rx_ring[i];
+ /* Set the new size. */
+ rx_ring->lbq_buf_size = lbq_buf_len;
+ }
+
+ status = ql_adapter_up(qdev);
+ if (status)
+ goto error;
+
+ return status;
+error:
+ QPRINTK(qdev, IFUP, ALERT,
+ "Driver up/down cycle failed, closing device.\n");
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+ dev_close(qdev->ndev);
+ return status;
+}
+
static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
if (ndev->mtu == 1500 && new_mtu == 9000) {
QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 0);
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
} else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3609,15 +3787,29 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
return 0;
} else
return -EINVAL;
+
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_port_cfg_work, 3*HZ);
+
+ if (!netif_running(qdev->ndev)) {
+ ndev->mtu = new_mtu;
+ return 0;
+ }
+
ndev->mtu = new_mtu;
- return 0;
+ status = ql_change_rx_buffers(qdev);
+ if (status) {
+ QPRINTK(qdev, IFUP, ERR,
+ "Changing MTU failed.\n");
+ }
+
+ return status;
}
static struct net_device_stats *qlge_get_stats(struct net_device
*ndev)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
- return &qdev->stats;
+ return &ndev->stats;
}
static void qlge_set_multicast_list(struct net_device *ndev)
@@ -3868,8 +4060,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
struct net_device *ndev, int cards_found)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- int pos, err = 0;
- u16 val16;
+ int err = 0;
memset((void *)qdev, 0, sizeof(*qdev));
err = pci_enable_device(pdev);
@@ -3881,18 +4072,12 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
qdev->ndev = ndev;
qdev->pdev = pdev;
pci_set_drvdata(pdev, ndev);
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pos <= 0) {
- dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
- "aborting.\n");
- return pos;
- } else {
- pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
- val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
- val16 |= (PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+
+ /* Set PCIe read request size */
+ err = pcie_set_readrq(pdev, 4096);
+ if (err) {
+ dev_err(&pdev->dev, "Set readrq failed.\n");
+ goto err_out;
}
err = pci_request_regions(pdev, DRV_NAME);
@@ -4191,6 +4376,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
return err;
}
+ ql_wol(qdev);
err = pci_save_state(pdev);
if (err)
return err;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index bcf13c96f73..bac7b86f212 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,25 +1,5 @@
#include "qlge.h"
-static void ql_display_mb_sts(struct ql_adapter *qdev,
- struct mbox_params *mbcp)
-{
- int i;
- static char *err_sts[] = {
- "Command Complete",
- "Command Not Supported",
- "Host Interface Error",
- "Checksum Error",
- "Unused Completion Status",
- "Test Failed",
- "Command Parameter Error"};
-
- QPRINTK(qdev, DRV, DEBUG, "%s.\n",
- err_sts[mbcp->mbox_out[0] & 0x0000000f]);
- for (i = 0; i < mbcp->out_count; i++)
- QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
-}
-
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
@@ -317,6 +297,7 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
} else {
QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
mbcp->mbox_out[1]);
+ qdev->fw_rev_id = mbcp->mbox_out[1];
status = ql_cam_route_initialize(qdev);
if (status)
QPRINTK(qdev, IFUP, ERR,
@@ -446,6 +427,9 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
ql_aen_lost(qdev, mbcp);
break;
+ case AEN_DCBX_CHG:
+ /* Need to support AEN 8110 */
+ break;
default:
QPRINTK(qdev, DRV, ERR,
"Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
@@ -537,7 +521,6 @@ done:
MB_CMD_STS_GOOD) &&
((mbcp->mbox_out[0] & 0x0000f000) !=
MB_CMD_STS_INTRMDT)) {
- ql_display_mb_sts(qdev, mbcp);
status = -EIO;
}
end:
@@ -655,7 +638,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
* for the current port.
* Most likely will block.
*/
-static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
+int ql_mb_set_port_cfg(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -690,7 +673,7 @@ static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
* for the current port.
* Most likely will block.
*/
-static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
+int ql_mb_get_port_cfg(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -720,6 +703,76 @@ static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
return status;
}
+int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 2;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
+ mbcp->mbox_in[1] = wol;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed to set WOL mode.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+ u8 *addr = qdev->ndev->dev_addr;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 8;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
+ if (enable_wol) {
+ mbcp->mbox_in[1] = (u32)addr[0];
+ mbcp->mbox_in[2] = (u32)addr[1];
+ mbcp->mbox_in[3] = (u32)addr[2];
+ mbcp->mbox_in[4] = (u32)addr[3];
+ mbcp->mbox_in[5] = (u32)addr[4];
+ mbcp->mbox_in[6] = (u32)addr[5];
+ mbcp->mbox_in[7] = 0;
+ } else {
+ mbcp->mbox_in[1] = 0;
+ mbcp->mbox_in[2] = 1;
+ mbcp->mbox_in[3] = 1;
+ mbcp->mbox_in[4] = 1;
+ mbcp->mbox_in[5] = 1;
+ mbcp->mbox_in[6] = 1;
+ mbcp->mbox_in[7] = 0;
+ }
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed to set WOL mode.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
/* IDC - Inter Device Communication...
* Some firmware commands require consent of adjacent FCOE
* function. This function waits for the OK, or a
@@ -769,6 +822,61 @@ static int ql_idc_wait(struct ql_adapter *qdev)
return status;
}
+int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 2;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
+ mbcp->mbox_in[1] = led_config;
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed to set LED Configuration.\n");
+ status = -EIO;
+ }
+
+ return status;
+}
+
+int ql_mb_get_led_cfg(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed to get LED Configuration.\n");
+ status = -EIO;
+ } else
+ qdev->led_config = mbcp->mbox_out[1];
+
+ return status;
+}
+
int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
{
struct mbox_params mbc;