diff options
author | Scott Feldman <scofeldm@cisco.com> | 2008-09-15 09:17:11 -0700 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-18 11:34:53 -0400 |
commit | 01f2e4ead2c51226ed1283ef6a8388ca6f4cff8f (patch) | |
tree | b1cc2ef1a191a3bf00f371d5dbc2028e1fee01c5 /drivers/net/enic/enic_main.c | |
parent | 452c1ce218a68b5dbd626397ecfc65ca89dd3cbb (diff) |
enic: add Cisco 10G Ethernet NIC driver
Signed-off-by: Scott Feldman <scofeldm@cisco.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/enic/enic_main.c')
-rw-r--r-- | drivers/net/enic/enic_main.c | 1949 |
1 files changed, 1949 insertions, 0 deletions
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c new file mode 100644 index 00000000000..4cf5ec76c99 --- /dev/null +++ b/drivers/net/enic/enic_main.c @@ -0,0 +1,1949 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/workqueue.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> + +#include "cq_enet_desc.h" +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "enic_res.h" +#include "enic.h" + +#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) +#define ENIC_JUMBO_FIRST_BUF_SIZE 256 + +/* Supported devices */ +static struct pci_device_id enic_id_table[] = { + { PCI_VDEVICE(CISCO, 0x0043) }, + { 0, } /* end of table */ +}; + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, enic_id_table); + +struct enic_stat { + char name[ETH_GSTRING_LEN]; + unsigned int offset; +}; + +#define ENIC_TX_STAT(stat) \ + { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } +#define ENIC_RX_STAT(stat) \ + { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } + +static const struct enic_stat enic_tx_stats[] = { + ENIC_TX_STAT(tx_frames_ok), + ENIC_TX_STAT(tx_unicast_frames_ok), + ENIC_TX_STAT(tx_multicast_frames_ok), + ENIC_TX_STAT(tx_broadcast_frames_ok), + ENIC_TX_STAT(tx_bytes_ok), + ENIC_TX_STAT(tx_unicast_bytes_ok), + ENIC_TX_STAT(tx_multicast_bytes_ok), + ENIC_TX_STAT(tx_broadcast_bytes_ok), + ENIC_TX_STAT(tx_drops), + ENIC_TX_STAT(tx_errors), + ENIC_TX_STAT(tx_tso), +}; + +static const struct enic_stat enic_rx_stats[] = { + ENIC_RX_STAT(rx_frames_ok), + ENIC_RX_STAT(rx_frames_total), + ENIC_RX_STAT(rx_unicast_frames_ok), + ENIC_RX_STAT(rx_multicast_frames_ok), + ENIC_RX_STAT(rx_broadcast_frames_ok), + ENIC_RX_STAT(rx_bytes_ok), + ENIC_RX_STAT(rx_unicast_bytes_ok), + ENIC_RX_STAT(rx_multicast_bytes_ok), + ENIC_RX_STAT(rx_broadcast_bytes_ok), + ENIC_RX_STAT(rx_drop), + ENIC_RX_STAT(rx_no_bufs), + ENIC_RX_STAT(rx_errors), + ENIC_RX_STAT(rx_rss), + ENIC_RX_STAT(rx_crc_errors), + ENIC_RX_STAT(rx_frames_64), + ENIC_RX_STAT(rx_frames_127), + ENIC_RX_STAT(rx_frames_255), + ENIC_RX_STAT(rx_frames_511), + ENIC_RX_STAT(rx_frames_1023), + ENIC_RX_STAT(rx_frames_1518), + ENIC_RX_STAT(rx_frames_to_max), +}; + +static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); +static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); + +static int enic_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(netdev)) { + ecmd->speed = vnic_dev_port_speed(enic->vdev); + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static void enic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_devcmd_fw_info *fw_info; + + spin_lock(&enic->devcmd_lock); + vnic_dev_fw_info(enic->vdev, &fw_info); + spin_unlock(&enic->devcmd_lock); + + strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strncpy(drvinfo->fw_version, fw_info->fw_version, + sizeof(drvinfo->fw_version)); + strncpy(drvinfo->bus_info, pci_name(enic->pdev), + sizeof(drvinfo->bus_info)); +} + +static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < enic_n_tx_stats; i++) { + memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < enic_n_rx_stats; i++) { + memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int enic_get_stats_count(struct net_device *netdev) +{ + return enic_n_tx_stats + enic_n_rx_stats; +} + +static void enic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_stats *vstats; + unsigned int i; + + spin_lock(&enic->devcmd_lock); + vnic_dev_stats_dump(enic->vdev, &vstats); + spin_unlock(&enic->devcmd_lock); + + for (i = 0; i < enic_n_tx_stats; i++) + *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; + for (i = 0; i < enic_n_rx_stats; i++) + *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; +} + +static u32 enic_get_rx_csum(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + return enic->csum_rx_enabled; +} + +static int enic_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct enic *enic = netdev_priv(netdev); + + enic->csum_rx_enabled = + (data && ENIC_SETTING(enic, RXCSUM)) ? 1 : 0; + + return 0; +} + +static int enic_set_tx_csum(struct net_device *netdev, u32 data) +{ + struct enic *enic = netdev_priv(netdev); + + if (data && ENIC_SETTING(enic, TXCSUM)) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int enic_set_tso(struct net_device *netdev, u32 data) +{ + struct enic *enic = netdev_priv(netdev); + + if (data && ENIC_SETTING(enic, TSO)) + netdev->features |= + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; + else + netdev->features &= + ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); + + return 0; +} + +static u32 enic_get_msglevel(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + return enic->msg_enable; +} + +static void enic_set_msglevel(struct net_device *netdev, u32 value) +{ + struct enic *enic = netdev_priv(netdev); + enic->msg_enable = value; +} + +static struct ethtool_ops enic_ethtool_ops = { + .get_settings = enic_get_settings, + .get_drvinfo = enic_get_drvinfo, + .get_msglevel = enic_get_msglevel, + .set_msglevel = enic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = enic_get_strings, + .get_stats_count = enic_get_stats_count, + .get_ethtool_stats = enic_get_ethtool_stats, + .get_rx_csum = enic_get_rx_csum, + .set_rx_csum = enic_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = enic_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = enic_set_tso, +}; + +static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(wq->vdev); + + if (buf->sop) + pci_unmap_single(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_TODEVICE); + else + pci_unmap_page(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_TODEVICE); + + if (buf->os_buf) + dev_kfree_skb_any(buf->os_buf); +} + +static void enic_wq_free_buf(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) +{ + enic_free_wq_buf(wq, buf); +} + +static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + spin_lock(&enic->wq_lock[q_number]); + + vnic_wq_service(&enic->wq[q_number], cq_desc, + completed_index, enic_wq_free_buf, + opaque); + + if (netif_queue_stopped(enic->netdev) && + vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1) + netif_wake_queue(enic->netdev); + + spin_unlock(&enic->wq_lock[q_number]); + + return 0; +} + +static void enic_log_q_error(struct enic *enic) +{ + unsigned int i; + u32 error_status; + + for (i = 0; i < enic->wq_count; i++) { + error_status = vnic_wq_error_status(&enic->wq[i]); + if (error_status) + printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", + enic->netdev->name, i, error_status); + } + + for (i = 0; i < enic->rq_count; i++) { + error_status = vnic_rq_error_status(&enic->rq[i]); + if (error_status) + printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", + enic->netdev->name, i, error_status); + } +} + +static void enic_link_check(struct enic *enic) +{ + int link_status = vnic_dev_link_status(enic->vdev); + int carrier_ok = netif_carrier_ok(enic->netdev); + + if (link_status && !carrier_ok) { + printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); + netif_carrier_on(enic->netdev); + } else if (!link_status && carrier_ok) { + printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); + netif_carrier_off(enic->netdev); + } +} + +static void enic_mtu_check(struct enic *enic) +{ + u32 mtu = vnic_dev_mtu(enic->vdev); + + if (mtu != enic->port_mtu) { + if (mtu < enic->netdev->mtu) + printk(KERN_WARNING PFX + "%s: interface MTU (%d) set higher " + "than switch port MTU (%d)\n", + enic->netdev->name, enic->netdev->mtu, mtu); + enic->port_mtu = mtu; + } +} + +static void enic_msglvl_check(struct enic *enic) +{ + u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); + + if (msg_enable != enic->msg_enable) { + printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", + enic->netdev->name, enic->msg_enable, msg_enable); + enic->msg_enable = msg_enable; + } +} + +static void enic_notify_check(struct enic *enic) +{ + enic_msglvl_check(enic); + enic_mtu_check(enic); + enic_link_check(enic); +} + +#define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) + +static irqreturn_t enic_isr_legacy(int irq, void *data) +{ + struct net_device *netdev = data; + struct enic *enic = netdev_priv(netdev); + u32 pba; + + vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); + + pba = vnic_intr_legacy_pba(enic->legacy_pba); + if (!pba) { + vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); + return IRQ_NONE; /* not our interrupt */ + } + + if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) + enic_notify_check(enic); + + if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { + enic_log_q_error(enic); + /* schedule recovery from WQ/RQ error */ + schedule_work(&enic->reset); + return IRQ_HANDLED; + } + + if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { + if (netif_rx_schedule_prep(netdev, &enic->napi)) + __netif_rx_schedule(netdev, &enic->napi); + } else { + vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); + } + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msi(int irq, void *data) +{ + struct enic *enic = data; + + /* With MSI, there is no sharing of interrupts, so this is + * our interrupt and there is no need to ack it. The device + * is not providing per-vector masking, so the OS will not + * write to PCI config space to mask/unmask the interrupt. + * We're using mask_on_assertion for MSI, so the device + * automatically masks the interrupt when the interrupt is + * generated. Later, when exiting polling, the interrupt + * will be unmasked (see enic_poll). + * + * Also, the device uses the same PCIe Traffic Class (TC) + * for Memory Write data and MSI, so there are no ordering + * issues; the MSI will always arrive at the Root Complex + * _after_ corresponding Memory Writes (i.e. descriptor + * writes). + */ + + netif_rx_schedule(enic->netdev, &enic->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msix_rq(int irq, void *data) +{ + struct enic *enic = data; + + /* schedule NAPI polling for RQ cleanup */ + netif_rx_schedule(enic->netdev, &enic->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msix_wq(int irq, void *data) +{ + struct enic *enic = data; + unsigned int wq_work_to_do = -1; /* no limit */ + unsigned int wq_work_done; + + wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], + wq_work_to_do, enic_wq_service, NULL); + + vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], + wq_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msix_err(int irq, void *data) +{ + struct enic *enic = data; + + enic_log_q_error(enic); + + /* schedule recovery from WQ/RQ error */ + schedule_work(&enic->reset); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msix_notify(int irq, void *data) +{ + struct enic *enic = data; + + enic_notify_check(enic); + vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]); + + return IRQ_HANDLED; +} + +static inline void enic_queue_wq_skb_cont(struct enic *enic, + struct vnic_wq *wq, struct sk_buff *skb, + unsigned int len_left) +{ + skb_frag_t *frag; + + /* Queue additional data fragments */ + for (frag = skb_shinfo(skb)->frags; len_left; frag++) { + len_left -= frag->size; + enic_queue_wq_desc_cont(wq, skb, + pci_map_page(enic->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE), + frag->size, + (len_left == 0)); /* EOP? */ + } +} + +static inline void enic_queue_wq_skb_vlan(struct enic *enic, + struct vnic_wq *wq, struct sk_buff *skb, + int vlan_tag_insert, unsigned int vlan_tag) +{ + unsigned int head_len = skb_headlen(skb); + unsigned int len_left = skb->len - head_len; + int eop = (len_left == 0); + + /* Queue the main skb fragment */ + enic_queue_wq_desc(wq, skb, + pci_map_single(enic->pdev, skb->data, + head_len, PCI_DMA_TODEVICE), + head_len, + vlan_tag_insert, vlan_tag, + eop); + + if (!eop) + enic_queue_wq_skb_cont(enic, wq, skb, len_left); +} + +static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, + struct vnic_wq *wq, struct sk_buff *skb, + int vlan_tag_insert, unsigned int vlan_tag) +{ + unsigned int head_len = skb_headlen(skb); + unsigned int len_left = skb->len - head_len; + unsigned int hdr_len = skb_transport_offset(skb); + unsigned int csum_offset = hdr_len + skb->csum_offset; + int eop = (len_left == 0); + + /* Queue the main skb fragment */ + enic_queue_wq_desc_csum_l4(wq, skb, + pci_map_single(enic->pdev, skb->data, + head_len, PCI_DMA_TODEVICE), + head_len, + csum_offset, + hdr_len, + vlan_tag_insert, vlan_tag, + eop); + + if (!eop) + enic_queue_wq_skb_cont(enic, wq, skb, len_left); +} + +static inline void enic_queue_wq_skb_tso(struct enic *enic, + struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, + int vlan_tag_insert, unsigned int vlan_tag) +{ + unsigned int head_len = skb_headlen(skb); + unsigned int len_left = skb->len - head_len; + unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int eop = (len_left == 0); + + /* Preload TCP csum field with IP pseudo hdr calculated + * with IP length set to zero. HW will later add in length + * to each TCP segment resulting from the TSO. + */ + + if (skb->protocol == __constant_htons(ETH_P_IP)) { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } + + /* Queue the main skb fragment */ + enic_queue_wq_desc_tso(wq, skb, + pci_map_single(enic->pdev, skb->data, + head_len, PCI_DMA_TODEVICE), + head_len, + mss, hdr_len, + vlan_tag_insert, vlan_tag, + eop); + + if (!eop) + enic_queue_wq_skb_cont(enic, wq, skb, len_left); +} + +static inline void enic_queue_wq_skb(struct enic *enic, + struct vnic_wq *wq, struct sk_buff *skb) +{ + unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int vlan_tag = 0; + int vlan_tag_insert = 0; + + if (enic->vlan_group && vlan_tx_tag_present(skb)) { + /* VLAN tag from trunking driver */ + vlan_tag_insert = 1; + vlan_tag = vlan_tx_tag_get(skb); + } + + if (mss) + enic_queue_wq_skb_tso(enic, wq, skb, mss, + vlan_tag_insert, vlan_tag); + else if (skb->ip_summed == CHECKSUM_PARTIAL) + enic_queue_wq_skb_csum_l4(enic, wq, skb, + vlan_tag_insert, vlan_tag); + else + enic_queue_wq_skb_vlan(enic, wq, skb, + vlan_tag_insert, vlan_tag); +} + +/* netif_tx_lock held, process context with BHs disabled */ +static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_wq *wq = &enic->wq[0]; + unsigned long flags; + + if (skb->len <= 0) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, + * which is very likely. In the off chance it's going to take + * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. + */ + + if (skb_shinfo(skb)->gso_size == 0 && + skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && + skb_linearize(skb)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&enic->wq_lock[0], flags); + + if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) { + netif_stop_queue(netdev); + /* This is a hard error, log it */ + printk(KERN_ERR PFX "%s: BUG! Tx ring full when " + "queue awake!\n", netdev->name); + spin_unlock_irqrestore(&enic->wq_lock[0], flags); + return NETDEV_TX_BUSY; + } + + enic_queue_wq_skb(enic, wq, skb); + + if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1) + netif_stop_queue(netdev); + + netdev->trans_start = jiffies; + + spin_unlock_irqrestore(&enic->wq_lock[0], flags); + + return NETDEV_TX_OK; +} + +/* dev_base_lock rwlock held, nominally process context */ +static struct net_device_stats *enic_get_stats(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_stats *stats; + + spin_lock(&enic->devcmd_lock); + vnic_dev_stats_dump(enic->vdev, &stats); + spin_unlock(&enic->devcmd_lock); + + enic->net_stats.tx_packets = stats->tx.tx_frames_ok; + enic->net_stats.tx_bytes = stats->tx.tx_bytes_ok; + enic->net_stats.tx_errors = stats->tx.tx_errors; + enic->net_stats.tx_dropped = stats->tx.tx_drops; + + enic->net_stats.rx_packets = stats->rx.rx_frames_ok; + enic->net_stats.rx_bytes = stats->rx.rx_bytes_ok; + enic->net_stats.rx_errors = stats->rx.rx_errors; + enic->net_stats.multicast = stats->rx.rx_multicast_frames_ok; + enic->net_stats.rx_crc_errors = stats->rx.rx_crc_errors; + enic->net_stats.rx_dropped = stats->rx.rx_no_bufs; + + return &enic->net_stats; +} + +static void enic_reset_mcaddrs(struct enic *enic) +{ + enic->mc_count = 0; +} + +static int enic_set_mac_addr(struct net_device *netdev, char *addr) +{ + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr, netdev->addr_len); + + return 0; +} + +/* netif_tx_lock held, BHs disabled */ +static void enic_set_multicast_list(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + struct dev_mc_list *list = netdev->mc_list; + int directed = 1; + int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; + int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; + int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; + int allmulti = (netdev->flags & IFF_ALLMULTI) || + (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); + u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; + unsigned int mc_count = netdev->mc_count; + unsigned int i, j; + + if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) + mc_count = ENIC_MULTICAST_PERFECT_FILTERS; + + spin_lock(&enic->devcmd_lock); + + vnic_dev_packet_filter(enic->vdev, directed, + multicast, broadcast, promisc, allmulti); + + /* Is there an easier way? Trying to minimize to + * calls to add/del multicast addrs. We keep the + * addrs from the last call in enic->mc_addr and + * look for changes to add/del. + */ + + for (i = 0; list && i < mc_count; i++) { + memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); + list = list->next; + } + + for (i = 0; i < enic->mc_count; i++) { + for (j = 0; j < mc_count; j++) + if (compare_ether_addr(enic->mc_addr[i], + mc_addr[j]) == 0) + break; + if (j == mc_count) + enic_del_multicast_addr(enic, enic->mc_addr[i]); + } + + for (i = 0; i < mc_count; i++) { + for (j = 0; j < enic->mc_count; j++) + if (compare_ether_addr(mc_addr[i], + enic->mc_addr[j]) == 0) + break; + if (j == enic->mc_count) + enic_add_multicast_addr(enic, mc_addr[i]); + } + + /* Save the list to compare against next time + */ + + for (i = 0; i < mc_count; i++) + memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); + + enic->mc_count = mc_count; + + spin_unlock(&enic->devcmd_lock); +} + +/* rtnl lock is held */ +static void enic_vlan_rx_register(struct net_device *netdev, + struct vlan_group *vlan_group) +{ + struct enic *enic = netdev_priv(netdev); + enic->vlan_group = vlan_group; +} + +/* rtnl lock is held */ +static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct enic *enic = netdev_priv(netdev); + + spin_lock(&enic->devcmd_lock); + enic_add_vlan(enic, vid); + spin_unlock(&enic->devcmd_lock); +} + +/* rtnl lock is held */ +static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct enic *enic = netdev_priv(netdev); + + spin_lock(&enic->devcmd_lock); + enic_del_vlan(enic, vid); + spin_unlock(&enic->devcmd_lock); +} + +/* netif_tx_lock held, BHs disabled */ +static void enic_tx_timeout(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + schedule_work(&enic->reset); +} + +static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + + if (!buf->os_buf) + return; + + pci_unmap_single(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(buf->os_buf); +} + +static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(size + NET_IP_ALIGN); + + if (skb) + skb_reserve(skb, NET_IP_ALIGN); + + return skb; +} + +static int enic_rq_alloc_buf(struct vnic_rq *rq) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct sk_buff *skb; + unsigned int len = enic->netdev->mtu + ETH_HLEN; + unsigned int os_buf_index = 0; + dma_addr_t dma_addr; + + skb = enic_rq_alloc_skb(len); + if (!skb) + return -ENOMEM; + + dma_addr = pci_map_single(enic->pdev, skb->data, + len, PCI_DMA_FROMDEVICE); + + enic_queue_rq_desc(rq, skb, os_buf_index, + dma_addr, len); + + return 0; +} + +static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, + void **tcph, u64 *hdr_flags, void *priv) +{ + struct cq_enet_rq_desc *cq_desc = priv; + unsigned int ip_len; + struct iphdr *iph; + + u8 type, color, eop, sop, ingress_port, vlan_stripped; + u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; + u8 packet_error; + u16 q_number, completed_index, bytes_written, vlan, checksum; + u32 rss_hash; + + cq_enet_rq_desc_dec(cq_desc, + &type, &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, &rss_type, + &csum_not_calc, &rss_hash, &bytes_written, + &packet_error, &vlan_stripped, &vlan, &checksum, + &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, + &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, + &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, + &fcs_ok); + + if (!(ipv4 && tcp && !ipv4_fragment)) + return -1; + + skb_reset_network_header(skb); + iph = ip_hdr(skb); + + ip_len = ip_hdrlen(skb); + skb_set_transport_header(skb, ip_len); + + /* check if ip header and tcp header are complete */ + if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) + return -1; + + *hdr_flags = LRO_IPV4 | LRO_TCP; + *tcph = tcp_hdr(skb); + *iphdr = iph; + + return 0; +} + +static void enic_rq_indicate_buf(struct vnic_rq *rq, + struct cq_desc *cq_desc, struct vnic_rq_buf *buf, + int skipped, void *opaque) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct sk_buff *skb; + + u8 type, color, eop, sop, ingress_port, vlan_stripped; + u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; + u8 packet_error; + u16 q_number, completed_index, bytes_written, vlan, checksum; + u32 rss_hash; + + if (skipped) + return; + + skb = buf->os_buf; + prefetch(skb->data - NET_IP_ALIGN); + pci_unmap_single(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_FROMDEVICE); + + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, + &type, &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, &rss_type, + &csum_not_calc, &rss_hash, &bytes_written, + &packet_error, &vlan_stripped, &vlan, &checksum, + &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, + &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, + &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, + &fcs_ok); + + if (packet_error) { + + if (bytes_written > 0 && !fcs_ok) { + if (net_ratelimit()) + printk(KERN_ERR PFX + "%s: packet error: bad FCS\n", + enic->netdev->name); + } + + dev_kfree_skb_any(skb); + + return; + } + + if (eop && bytes_written > 0) { + + /* Good receive + */ + + skb_put(skb, bytes_written); + skb->protocol = eth_type_trans(skb, enic->netdev); + + if (enic->csum_rx_enabled && !csum_not_calc) { + skb->csum = htons(checksum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb->dev = enic->netdev; + enic->netdev->last_rx = jiffies; + + if (enic->vlan_group && vlan_stripped) { + + if (ENIC_SETTING(enic, LRO)) + lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, + skb, enic->vlan_group, + vlan, cq_desc); + else + vlan_hwaccel_receive_skb(skb, + enic->vlan_group, vlan); + + } else { + + if (ENIC_SETTING(enic, LRO)) + lro_receive_skb(&enic->lro_mgr, skb, cq_desc); + else + netif_receive_skb(skb); + + } + + } else { + + /* Buffer overflow + */ + + dev_kfree_skb_any(skb); + } +} + +static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + vnic_rq_service(&enic->rq[q_number], cq_desc, + completed_index, VNIC_RQ_RETURN_DESC, + enic_rq_indicate_buf, opaque); + + return 0; +} + +static void enic_rq_drop_buf(struct vnic_rq *rq, + struct cq_desc *cq_desc, struct vnic_rq_buf *buf, + int skipped, void *opaque) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct sk_buff *skb = buf->os_buf; + + if (skipped) + return; + + pci_unmap_single(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_FROMDEVICE); + + dev_kfree_skb_any(skb); +} + +static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + vnic_rq_service(&enic->rq[q_number], cq_desc, + completed_index, VNIC_RQ_RETURN_DESC, + enic_rq_drop_buf, opaque); + + return 0; +} + +static int enic_poll(struct napi_struct *napi, int budget) +{ + struct enic *enic = container_of(napi, struct enic, napi); + struct net_device *netdev = enic->netdev; + unsigned int rq_work_to_do = budget; + unsigned int wq_work_to_do = -1; /* no limit */ + unsigned int work_done, rq_work_done, wq_work_done; + + /* Service RQ (first) and WQ + */ + + rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], + rq_work_to_do, enic_rq_service, NULL); + + wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], + wq_work_to_do, enic_wq_service, NULL); + + /* Accumulate intr event credits for this polling + * cycle. An intr event is the completion of a + * a WQ or RQ packet. + */ + + work_done = rq_work_done + wq_work_done; + + if (work_done > 0) + vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], + work_done, + 0 /* don't unmask intr */, + 0 /* don't reset intr timer */); + + if (rq_work_done > 0) { + + /* Replenish RQ + */ + + vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + + } else { + + /* If no work done, flush all LROs and exit polling + */ + + if (ENIC_SETTING(enic, LRO)) + lro_flush_all(&enic->lro_mgr); + + netif_rx_complete(netdev, napi); + vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); + } + + return rq_work_done; +} + +static int enic_poll_msix(struct napi_struct *napi, int budget) +{ + struct enic *enic = container_of(napi, struct enic, napi); + struct net_device *netdev = enic->netdev; + unsigned int work_to_do = budget; + unsigned int work_done; + + /* Service RQ + */ + + work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], + work_to_do, enic_rq_service, NULL); + + if (work_done > 0) { + + /* Replenish RQ + */ + + vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + + /* Accumulate intr event credits for this polling + * cycle. An intr event is the completion of a + * a WQ or RQ packet. + */ + + vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], + work_done, + 0 /* don't unmask intr */, + 0 /* don't reset intr timer */); + } else { + + /* If no work done, flush all LROs and exit polling + */ + + if (ENIC_SETTING(enic, LRO)) + lro_flush_all(&enic->lro_mgr); + + netif_rx_complete(netdev, napi); + vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); + } + + return work_done; +} + +static void enic_notify_timer(unsigned long data) +{ + struct enic *enic = (struct enic *)data; + + enic_notify_check(enic); + + mod_timer(&enic->notify_timer, round_jiffies(ENIC_NOTIFY_TIMER_PERIOD)); +} + +static void enic_free_intr(struct enic *enic) +{ + struct net_device *netdev = enic->netdev; + unsigned int i; + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSI: + free_irq(enic->pdev->irq, netdev); + break; + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + if (enic->msix[i].requested) + free_irq(enic->msix_entry[i].vector, + enic->msix[i].devid); + break; + default: + break; + } +} + +static int enic_request_intr(struct enic *enic) +{ + struct net_device *netdev = enic->netdev; + unsigned int i; + int err = 0; + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + + case VNIC_DEV_INTR_MODE_INTX: + + err = request_irq(enic->pdev->irq, enic_isr_legacy, + IRQF_SHARED, netdev->name, netdev); + break; + + case VNIC_DEV_INTR_MODE_MSI: + + err = request_irq(enic->pdev->irq, enic_isr_msi, + 0, netdev->name, enic); + break; + + case VNIC_DEV_INTR_MODE_MSIX: + + sprintf(enic->msix[ENIC_MSIX_RQ].devname, + "%.11s-rx", netdev->name); + enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; + enic->msix[ENIC_MSIX_RQ].devid = enic; + + sprintf(enic->msix[E |