diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c')
| -rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3319 |
1 files changed, 2975 insertions, 344 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e83d12c7bf2..a83271cf17c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -60,6 +60,7 @@ #include <linux/workqueue.h> #include <net/neighbour.h> #include <net/netevent.h> +#include <net/addrconf.h> #include <asm/uaccess.h> #include "cxgb4.h" @@ -68,8 +69,13 @@ #include "t4fw_api.h" #include "l2t.h" -#define DRV_VERSION "1.3.0-ko" -#define DRV_DESC "Chelsio T4 Network Driver" +#include <../drivers/net/bonding/bonding.h> + +#ifdef DRV_VERSION +#undef DRV_VERSION +#endif +#define DRV_VERSION "2.0.0-ko" +#define DRV_DESC "Chelsio T4/T5 Network Driver" /* * Max interrupt hold-off timer value in us. Queues fall back to this value @@ -78,28 +84,45 @@ */ #define MAX_SGE_TIMERVAL 200U -#ifdef CONFIG_PCI_IOV -/* - * Virtual Function provisioning constants. We need two extra Ingress Queues - * with Interrupt capability to serve as the VF's Firmware Event Queue and - * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free - * Lists associated with them). For each Ethernet/Control Egress Queue and - * for each Free List, we need an Egress Context. - */ enum { + /* + * Physical Function provisioning constants. + */ + PFRES_NVI = 4, /* # of Virtual Interfaces */ + PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */ + PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr + */ + PFRES_NEQ = 256, /* # of egress queues */ + PFRES_NIQ = 0, /* # of ingress queues */ + PFRES_TC = 0, /* PCI-E traffic class */ + PFRES_NEXACTF = 128, /* # of exact MPS filters */ + + PFRES_R_CAPS = FW_CMD_CAP_PF, + PFRES_WX_CAPS = FW_CMD_CAP_PF, + +#ifdef CONFIG_PCI_IOV + /* + * Virtual Function provisioning constants. We need two extra Ingress + * Queues with Interrupt capability to serve as the VF's Firmware + * Event Queue and Forwarded Interrupt Queue (when using MSI mode) -- + * neither will have Free Lists associated with them). For each + * Ethernet/Control Egress Queue and for each Free List, we need an + * Egress Context. + */ VFRES_NPORTS = 1, /* # of "ports" per VF */ VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ - VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ + VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ VFRES_TC = 0, /* PCI-E traffic class */ VFRES_NEXACTF = 16, /* # of exact MPS filters */ VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, +#endif }; /* @@ -146,16 +169,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter, } /*NOTREACHED*/ } -#endif - -enum { - MEMWIN0_APERTURE = 65536, - MEMWIN0_BASE = 0x30000, - MEMWIN1_APERTURE = 32768, - MEMWIN1_BASE = 0x28000, - MEMWIN2_APERTURE = 2048, - MEMWIN2_BASE = 0x1b800, -}; enum { MAX_TXQ_ENTRIES = 16384, @@ -168,6 +181,30 @@ enum { MIN_FL_ENTRIES = 16 }; +/* Host shadow copy of ingress filter entry. This is in host native format + * and doesn't match the ordering or bit order, etc. of the hardware of the + * firmware command. The use of bit-field structure elements is purely to + * remind ourselves of the field size limitations and save memory in the case + * where the filter table is large. + */ +struct filter_entry { + /* Administrative fields for filter. + */ + u32 valid:1; /* filter allocated and valid */ + u32 locked:1; /* filter is administratively locked */ + + u32 pending:1; /* filter action is pending firmware reply */ + u32 smtidx:8; /* Source MAC Table index for smac */ + struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ + + /* The filter itself. Most of this is a straight copy of information + * provided by the extended ioctl(). Some fields are translated to + * internal forms -- for instance the Ingress Queue ID passed in from + * the ioctl() is translated into the Absolute Ingress Queue ID. + */ + struct ch_filter_specification fs; +}; + #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) @@ -196,17 +233,99 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { CH_DEVICE(0x4408, 4), CH_DEVICE(0x4409, 4), CH_DEVICE(0x440a, 4), + CH_DEVICE(0x440d, 4), + CH_DEVICE(0x440e, 4), + CH_DEVICE(0x5001, 4), + CH_DEVICE(0x5002, 4), + CH_DEVICE(0x5003, 4), + CH_DEVICE(0x5004, 4), + CH_DEVICE(0x5005, 4), + CH_DEVICE(0x5006, 4), + CH_DEVICE(0x5007, 4), + CH_DEVICE(0x5008, 4), + CH_DEVICE(0x5009, 4), + CH_DEVICE(0x500A, 4), + CH_DEVICE(0x500B, 4), + CH_DEVICE(0x500C, 4), + CH_DEVICE(0x500D, 4), + CH_DEVICE(0x500E, 4), + CH_DEVICE(0x500F, 4), + CH_DEVICE(0x5010, 4), + CH_DEVICE(0x5011, 4), + CH_DEVICE(0x5012, 4), + CH_DEVICE(0x5013, 4), + CH_DEVICE(0x5014, 4), + CH_DEVICE(0x5015, 4), + CH_DEVICE(0x5080, 4), + CH_DEVICE(0x5081, 4), + CH_DEVICE(0x5082, 4), + CH_DEVICE(0x5083, 4), + CH_DEVICE(0x5084, 4), + CH_DEVICE(0x5085, 4), + CH_DEVICE(0x5401, 4), + CH_DEVICE(0x5402, 4), + CH_DEVICE(0x5403, 4), + CH_DEVICE(0x5404, 4), + CH_DEVICE(0x5405, 4), + CH_DEVICE(0x5406, 4), + CH_DEVICE(0x5407, 4), + CH_DEVICE(0x5408, 4), + CH_DEVICE(0x5409, 4), + CH_DEVICE(0x540A, 4), + CH_DEVICE(0x540B, 4), + CH_DEVICE(0x540C, 4), + CH_DEVICE(0x540D, 4), + CH_DEVICE(0x540E, 4), + CH_DEVICE(0x540F, 4), + CH_DEVICE(0x5410, 4), + CH_DEVICE(0x5411, 4), + CH_DEVICE(0x5412, 4), + CH_DEVICE(0x5413, 4), + CH_DEVICE(0x5414, 4), + CH_DEVICE(0x5415, 4), + CH_DEVICE(0x5480, 4), + CH_DEVICE(0x5481, 4), + CH_DEVICE(0x5482, 4), + CH_DEVICE(0x5483, 4), + CH_DEVICE(0x5484, 4), + CH_DEVICE(0x5485, 4), { 0, } }; -#define FW_FNAME "cxgb4/t4fw.bin" +#define FW4_FNAME "cxgb4/t4fw.bin" +#define FW5_FNAME "cxgb4/t5fw.bin" +#define FW4_CFNAME "cxgb4/t4-config.txt" +#define FW5_CFNAME "cxgb4/t5-config.txt" MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); -MODULE_FIRMWARE(FW_FNAME); +MODULE_FIRMWARE(FW4_FNAME); +MODULE_FIRMWARE(FW5_FNAME); + +/* + * Normally we're willing to become the firmware's Master PF but will be happy + * if another PF has already become the Master and initialized the adapter. + * Setting "force_init" will cause this driver to forcibly establish itself as + * the Master PF and initialize the adapter. + */ +static uint force_init; + +module_param(force_init, uint, 0644); +MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter"); + +/* + * Normally if the firmware we connect to has Configuration File support, we + * use that and only fall back to the old Driver-based initialization if the + * Configuration File fails for some reason. If force_old_init is set, then + * we'll always use the old Driver-based initialization sequence. + */ +static uint force_old_init; + +module_param(force_old_init, uint, 0644); +MODULE_PARM_DESC(force_old_init, "Force old initialization sequence"); static int dflt_msg_enable = DFLT_MSG_ENABLE; @@ -243,22 +362,69 @@ module_param_array(intr_cnt, uint, NULL, 0644); MODULE_PARM_DESC(intr_cnt, "thresholds 1..3 for queue interrupt packet counters"); +/* + * Normally we tell the chip to deliver Ingress Packets into our DMA buffers + * offset by 2 bytes in order to have the IP headers line up on 4-byte + * boundaries. This is a requirement for many architectures which will throw + * a machine check fault if an attempt is made to access one of the 4-byte IP + * header fields on a non-4-byte boundary. And it's a major performance issue + * even on some architectures which allow it like some implementations of the + * x86 ISA. However, some architectures don't mind this and for some very + * edge-case performance sensitive applications (like forwarding large volumes + * of small packets), setting this DMA offset to 0 will decrease the number of + * PCI-E Bus transfers enough to measurably affect performance. + */ +static int rx_dma_offset = 2; + static bool vf_acls; #ifdef CONFIG_PCI_IOV module_param(vf_acls, bool, 0644); MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); -static unsigned int num_vf[4]; +/* Configure the number of PCI-E Virtual Function which are to be instantiated + * on SR-IOV Capable Physical Functions. + */ +static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; module_param_array(num_vf, uint, NULL, 0644); MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); #endif +/* + * The filter TCAM has a fixed portion and a variable portion. The fixed + * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP + * ports. The variable portion is 36 bits which can include things like Exact + * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits), + * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would + * far exceed the 36-bit budget for this "compressed" header portion of the + * filter. Thus, we have a scarce resource which must be carefully managed. + * + * By default we set this up to mostly match the set of filter matching + * capabilities of T3 but with accommodations for some of T4's more + * interesting features: + * + * { IP Fragment (1), MPS Match Type (3), IP Protocol (8), + * [Inner] VLAN (17), Port (3), FCoE (1) } + */ +enum { + TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC, + TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT, + TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT, +}; + +static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; + +module_param(tp_vlan_pri_map, uint, 0644); +MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration"); + static struct dentry *cxgb4_debugfs_root; static LIST_HEAD(adapter_list); static DEFINE_MUTEX(uld_mutex); +/* Adapter list to be accessed from atomic context */ +static LIST_HEAD(adap_rcu_list); +static DEFINE_SPINLOCK(adap_rcu_lock); static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; static const char *uld_str[] = { "RDMA", "iSCSI" }; @@ -273,15 +439,18 @@ static void link_report(struct net_device *dev) const struct port_info *p = netdev_priv(dev); switch (p->link_cfg.speed) { - case SPEED_10000: + case 10000: s = "10Gbps"; break; - case SPEED_1000: + case 1000: s = "1000Mbps"; break; - case SPEED_100: + case 100: s = "100Mbps"; break; + case 40000: + s = "40Gbps"; + break; } netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, @@ -369,6 +538,18 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) uhash | mhash, sleep); } +int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ +module_param(dbfifo_int_thresh, int, 0644); +MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); + +/* + * usecs to sleep while draining the dbfifo + */ +static int dbfifo_drain_delay = 1000; +module_param(dbfifo_drain_delay, int, 0644); +MODULE_PARM_DESC(dbfifo_drain_delay, + "usecs to sleep while draining the dbfifo"); + /* * Set Rx properties of a port, such as promiscruity, address filters, and MTU. * If @mtu is -1 it is left unchanged. @@ -387,6 +568,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) return ret; } +static struct workqueue_struct *workq; + /** * link_start - enable a port * @dev: the port to enable @@ -404,7 +587,7 @@ static int link_start(struct net_device *dev) * that step explicitly. */ ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, - !!(dev->features & NETIF_F_HW_VLAN_RX), true); + !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); if (ret == 0) { ret = t4_change_mac(pi->adapter, mb, pi->viid, pi->xact_addr_filt, dev->dev_addr, true, @@ -422,8 +605,67 @@ static int link_start(struct net_device *dev) return ret; } -/* - * Response queue handler for the FW event queue. +/* Clear a filter and release any of its resources that we own. This also + * clears the filter's "pending" status. + */ +static void clear_filter(struct adapter *adap, struct filter_entry *f) +{ + /* If the new or old filter have loopback rewriteing rules then we'll + * need to free any existing Layer Two Table (L2T) entries of the old + * filter rule. The firmware will handle freeing up any Source MAC + * Table (SMT) entries used for rewriting Source MAC Addresses in + * loopback rules. + */ + if (f->l2t) + cxgb4_l2t_release(f->l2t); + + /* The zeroing of the filter rule below clears the filter valid, + * pending, locked flags, l2t pointer, etc. so it's all we need for + * this operation. + */ + memset(f, 0, sizeof(*f)); +} + +/* Handle a filter write/deletion reply. + */ +static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) +{ + unsigned int idx = GET_TID(rpl); + unsigned int nidx = idx - adap->tids.ftid_base; + unsigned int ret; + struct filter_entry *f; + + if (idx >= adap->tids.ftid_base && nidx < + (adap->tids.nftids + adap->tids.nsftids)) { + idx = nidx; + ret = GET_TCB_COOKIE(rpl->cookie); + f = &adap->tids.ftid_tab[idx]; + + if (ret == FW_FILTER_WR_FLT_DELETED) { + /* Clear the filter when we get confirmation from the + * hardware that the filter has been deleted. + */ + clear_filter(adap, f); + } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) { + dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n", + idx); + clear_filter(adap, f); + } else if (ret == FW_FILTER_WR_FLT_ADDED) { + f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff; + f->pending = 0; /* asynchronous setup completed */ + f->valid = 1; + } else { + /* Something went wrong. Issue a warning about the + * problem and clear everything out. + */ + dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n", + idx, ret); + clear_filter(adap, f); + } + } +} + +/* Response queue handler for the FW event queue. */ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl) @@ -431,6 +673,21 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, u8 opcode = ((const struct rss_header *)rsp)->opcode; rsp++; /* skip RSS header */ + + /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. + */ + if (unlikely(opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { + rsp++; + opcode = ((const struct rss_header *)rsp)->opcode; + rsp++; + if (opcode != CPL_SGE_EGR_UPDATE) { + dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" + , opcode); + goto out; + } + } + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { const struct cpl_sge_egr_update *p = (void *)rsp; unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); @@ -458,9 +715,14 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_l2t_write_rpl *p = (void *)rsp; do_l2t_write_rpl(q->adap, p); + } else if (opcode == CPL_SET_TCB_RPL) { + const struct cpl_set_tcb_rpl *p = (void *)rsp; + + filter_rpl(q->adap, p); } else dev_err(q->adap->pdev_dev, "unexpected CPL %#x on FW event queue\n", opcode); +out: return 0; } @@ -478,6 +740,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, { struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + /* FW can send CPLs encapsulated in a CPL_FW4_MSG. + */ + if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) + rsp += 2; + if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { rxq->stats.nomem++; return -1; @@ -550,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap) for_each_rdmarxq(&adap->sge, i) snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", adap->port[0]->name, i); + + for_each_rdmaciq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d", + adap->port[0]->name, i); } static int request_msix_queue_irqs(struct adapter *adap) { struct sge *s = &adap->sge; - int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; + int msi_index = 2; err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, adap->msix_info[1].desc, &s->fw_evtq); @@ -563,56 +836,74 @@ static int request_msix_queue_irqs(struct adapter *adap) return err; for_each_ethrxq(s, ethqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->ethrxq[ethqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } for_each_ofldrxq(s, ofldqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->ofldrxq[ofldqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; } for_each_rdmarxq(s, rdmaqidx) { - err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, - adap->msix_info[msi].desc, + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, &s->rdmarxq[rdmaqidx].rspq); if (err) goto unwind; - msi++; + msi_index++; + } + for_each_rdmaciq(s, rdmaciqqidx) { + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, + &s->rdmaciq[rdmaciqqidx].rspq); + if (err) + goto unwind; + msi_index++; } return 0; unwind: + while (--rdmaciqqidx >= 0) + free_irq(adap->msix_info[--msi_index].vec, + &s->rdmaciq[rdmaciqqidx].rspq); while (--rdmaqidx >= 0) - free_irq(adap->msix_info[--msi].vec, + free_irq(adap->msix_info[--msi_index].vec, &s->rdmarxq[rdmaqidx].rspq); while (--ofldqidx >= 0) - free_irq(adap->msix_info[--msi].vec, + free_irq(adap->msix_info[--msi_index].vec, &s->ofldrxq[ofldqidx].rspq); while (--ethqidx >= 0) - free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[--msi_index].vec, + &s->ethrxq[ethqidx].rspq); free_irq(adap->msix_info[1].vec, &s->fw_evtq); return err; } static void free_msix_queue_irqs(struct adapter *adap) { - int i, msi = 2; + int i, msi_index = 2; struct sge *s = &adap->sge; free_irq(adap->msix_info[1].vec, &s->fw_evtq); for_each_ethrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); for_each_ofldrxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); for_each_rdmarxq(s, i) - free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); + free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); + for_each_rdmaciq(s, i) + free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq); } /** @@ -775,7 +1066,8 @@ freeout: t4_free_sge_resources(adap); if (msi_idx > 0) msi_idx++; err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, - &q->fl, uldrx_handler); + q->fl.size ? &q->fl : NULL, + uldrx_handler); if (err) goto freeout; memset(&q->stats, 0, sizeof(q->stats)); @@ -792,13 +1084,28 @@ freeout: t4_free_sge_resources(adap); if (msi_idx > 0) msi_idx++; err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], - msi_idx, &q->fl, uldrx_handler); + msi_idx, q->fl.size ? &q->fl : NULL, + uldrx_handler); if (err) goto freeout; memset(&q->stats, 0, sizeof(q->stats)); s->rdma_rxq[i] = q->rspq.abs_id; } + for_each_rdmaciq(s, i) { + struct sge_ofld_rxq *q = &s->rdmaciq[i]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], + msi_idx, q->fl.size ? &q->fl : NULL, + uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->rdma_ciq[i] = q->rspq.abs_id; + } + for_each_port(adap, i) { /* * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't @@ -818,52 +1125,12 @@ freeout: t4_free_sge_resources(adap); } /* - * Returns 0 if new FW was successfully loaded, a positive errno if a load was - * started but failed, and a negative errno if flash load couldn't start. - */ -static int upgrade_fw(struct adapter *adap) -{ - int ret; - u32 vers; - const struct fw_hdr *hdr; - const struct firmware *fw; - struct device *dev = adap->pdev_dev; - - ret = request_firmware(&fw, FW_FNAME, dev); - if (ret < 0) { - dev_err(dev, "unable to load firmware image " FW_FNAME - ", error %d\n", ret); - return ret; - } - - hdr = (const struct fw_hdr *)fw->data; - vers = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { - ret = -EINVAL; /* wrong major version, won't do */ - goto out; - } - - /* - * If the flash FW is unusable or we found something newer, load it. - */ - if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || - vers > adap->params.fw_vers) { - ret = -t4_load_fw(adap, fw->data, fw->size); - if (!ret) - dev_info(dev, "firmware upgraded to version %pI4 from " - FW_FNAME "\n", &hdr->fw_ver); - } -out: release_firmware(fw); - return ret; -} - -/* * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. * The allocated memory is cleared. */ void *t4_alloc_mem(size_t size) { - void *p = kzalloc(size, GFP_KERNEL); + void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!p) p = vzalloc(size); @@ -881,6 +1148,148 @@ static void t4_free_mem(void *addr) kfree(addr); } +/* Send a Work Request to write the filter at a specified index. We construct + * a Firmware Filter Work Request to have the work done and put the indicated + * filter into "pending" mode which will prevent any further actions against + * it till we get a reply from the firmware on the completion status of the + * request. + */ +static int set_filter_wr(struct adapter *adapter, int fidx) +{ + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct sk_buff *skb; + struct fw_filter_wr *fwr; + unsigned int ftid; + + /* If the new filter requires loopback Destination MAC and/or VLAN + * rewriting then we need to allocate a Layer 2 Table (L2T) entry for + * the filter. + */ + if (f->fs.newdmac || f->fs.newvlan) { + /* allocate L2T entry for new filter */ + f->l2t = t4_l2t_alloc_switching(adapter->l2t); + if (f->l2t == NULL) + return -EAGAIN; + if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, + f->fs.eport, f->fs.dmac)) { + cxgb4_l2t_release(f->l2t); + f->l2t = NULL; + return -ENOMEM; + } + } + + ftid = adapter->tids.ftid_base + fidx; + + skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL); + fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); + memset(fwr, 0, sizeof(*fwr)); + + /* It would be nice to put most of the following in t4_hw.c but most + * of the work is translating the cxgbtool ch_filter_specification + * into the Work Request and the definition of that structure is + * currently in cxgbtool.h which isn't appropriate to pull into the + * common code. We may eventually try to come up with a more neutral + * filter specification structure but for now it's easiest to simply + * put this fairly direct code in line ... + */ + fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); + fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16)); + fwr->tid_to_iq = + htonl(V_FW_FILTER_WR_TID(ftid) | + V_FW_FILTER_WR_RQTYPE(f->fs.type) | + V_FW_FILTER_WR_NOREPLY(0) | + V_FW_FILTER_WR_IQ(f->fs.iq)); + fwr->del_filter_to_l2tix = + htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | + V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | + V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | + V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | + V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | + V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | + V_FW_FILTER_WR_DMAC(f->fs.newdmac) | + V_FW_FILTER_WR_SMAC(f->fs.newsmac) | + V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || + f->fs.newvlan == VLAN_REWRITE) | + V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || + f->fs.newvlan == VLAN_REWRITE) | + V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | + V_FW_FILTER_WR_TXCHAN(f->fs.eport) | + V_FW_FILTER_WR_PRIO(f->fs.prio) | + V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); + fwr->ethtype = htons(f->fs.val.ethtype); + fwr->ethtypem = htons(f->fs.mask.ethtype); + fwr->frag_to_ovlan_vldm = + (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | + V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | + V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | + V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | + V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | + V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); + fwr->smac_sel = 0; + fwr->rx_chan_rx_rpl_iq = + htons(V_FW_FILTER_WR_RX_CHAN(0) | + V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id)); + fwr->maci_to_matchtypem = + htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | + V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | + V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | + V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | + V_FW_FILTER_WR_PORT(f->fs.val.iport) | + V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | + V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | + V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); + fwr->ptcl = f->fs.val.proto; + fwr->ptclm = f->fs.mask.proto; + fwr->ttyp = f->fs.val.tos; + fwr->ttypm = f->fs.mask.tos; + fwr->ivlan = htons(f->fs.val.ivlan); + fwr->ivlanm = htons(f->fs.mask.ivlan); + fwr->ovlan = htons(f->fs.val.ovlan); + fwr->ovlanm = htons(f->fs.mask.ovlan); + memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); + memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); + memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); + memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); + fwr->lp = htons(f->fs.val.lport); + fwr->lpm = htons(f->fs.mask.lport); + fwr->fp = htons(f->fs.val.fport); + fwr->fpm = htons(f->fs.mask.fport); + if (f->fs.newsmac) + memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma)); + + /* Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); + t4_ofld_send(adapter, skb); + return 0; +} + +/* Delete the filter at a specified index. + */ +static int del_filter_wr(struct adapter *adapter, int fidx) +{ + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct sk_buff *skb; + struct fw_filter_wr *fwr; + unsigned int len, ftid; + + len = sizeof(*fwr); + ftid = adapter->tids.ftid_base + fidx; + + skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); + fwr = (struct fw_filter_wr *)__skb_put(skb, len); + t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); + + /* Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + t4_mgmt_tx(adapter, skb); + return 0; +} + static inline int is_offload(const struct adapter *adap) { return adap->params.offload; @@ -974,6 +1383,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "VLANinsertions ", "GROpackets ", "GROmerged ", + "WriteCoalSuccess ", + "WriteCoalFail ", }; static int get_sset_count(struct net_device *dev, int sset) @@ -987,10 +1398,15 @@ static int get_sset_count(struct net_device *dev, int sset) } #define T4_REGMAP_SIZE (160 * 1024) +#define T5_REGMAP_SIZE (332 * 1024) static int get_regs_len(struct net_device *dev) { - return T4_REGMAP_SIZE; + struct adapter *adap = netdev2adap(dev); + if (is_t4(adap->params.chip)) + return T4_REGMAP_SIZE; + else + return T5_REGMAP_SIZE; } static int get_eeprom_len(struct net_device *dev) @@ -1064,11 +1480,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + u32 val1, val2; t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); data += sizeof(struct port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); + data += sizeof(struct queue_port_stats) / sizeof(u64); + if (!is_t4(adapter->params.chip)) { + t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); + val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); + val2 = t4_read_reg(adapter, SGE_STAT_MATCH); + *data = val1 - val2; + data++; + *data = val2; + data++; + } else { + memset(data, 0, 2 * sizeof(u64)); + *data += 2; + } } /* @@ -1079,7 +1509,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, */ static inline unsigned int mk_adap_vers(const struct adapter *ap) { - return 4 | (ap->params.rev << 10) | (1 << 16); + return CHELSIO_CHIP_VERSION(ap->params.chip) | + (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); } static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, @@ -1094,7 +1525,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { - static const unsigned int reg_ranges[] = { + static const unsigned int t4_reg_ranges[] = { 0x1008, 0x1108, 0x1180, 0x11b4, 0x11fc, 0x123c, @@ -1314,13 +1745,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0x27e00, 0x27e04 }; + static const unsigned int t5_reg_ranges[] = { + 0x1008, 0x1148, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1280, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x3028, + 0x3060, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x58bc, + 0x5940, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a9c, + 0x5b9c, 0x5bfc, + 0x6000, 0x6040, + 0x6058, 0x614c, + 0x7700, 0x7798, + 0x77c0, 0x78fc, + 0x7b00, 0x7c54, + 0x7d00, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e84, + 0x8ea0, 0x8f84, + 0x8fc0, 0x90f8, + 0x9400, 0x9470, + 0x9600, 0x96f4, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0x11088, + 0x1109c, 0x1117c, + 0x11190, 0x11204, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x19290, + 0x193f8, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c60, + 0x19c94, 0x19e10, + 0x19e50, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fe4, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30030, + 0x30100, 0x30144, + 0x30190, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x30834, + 0x308c0, 0x30908, + 0x30910, 0x309ac, + 0x30a00, 0x30a04, + 0x30a0c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30c24, + 0x30d08, 0x30d14, + 0x30d1c, 0x30d20, + 0x30d3c, 0x30d50, + 0x31200, 0x3120c, + 0x31220, 0x31220, + 0x31240, 0x31240, + 0x31600, 0x31600, + 0x31608, 0x3160c, + 0x31a00, 0x31a1c, + 0x31e04, 0x31e20, + 0x31e38, 0x31e3c, + 0x31e80, 0x31e80, + 0x31e88, 0x31ea8, + 0x31eb0, 0x31eb4, + 0x31ec8, 0x31ed4, + 0x31fb8, 0x32004, + 0x32208, 0x3223c, + 0x32600, 0x32630, + 0x32a00, 0x32abc, + 0x32b00, 0x32b70, + 0x33000, 0x33048, + 0x33060, 0x3309c, + 0x330f0, 0x33148, + 0x33160, 0x3319c, + 0x331f0, 0x332e4, + 0x332f8, 0x333e4, + 0x333f8, 0x33448, + 0x33460, 0x3349c, + 0x334f0, 0x33548, + 0x33560, 0x3359c, + 0x335f0, 0x336e4, + 0x336f8, 0x337e4, + 0x337f8, 0x337fc, + 0x33814, 0x33814, + 0x3382c, 0x3382c, + 0x33880, 0x3388c, + 0x338e8, 0x338ec, + 0x33900, 0x33948, + 0x33960, 0x3399c, + 0x339f0, 0x33ae4, + 0x33af8, 0x33b10, + 0x33b28, 0x33b28, + 0x33b3c, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c28, 0x33c28, + 0x33c3c, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34030, + 0x34100, 0x34144, + 0x34190, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x34834, + 0x348c0, 0x34908, + 0x34910, 0x349ac, + 0x34a00, 0x34a04, + 0x34a0c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34c24, + 0x34d08, 0x34d14, + 0x34d1c, 0x34d20, + 0x34d3c, 0x34d50, + 0x35200, 0x3520c, + 0x35220, 0x35220, + 0x35240, 0x35240, + 0x35600, 0x35600, + 0x35608, 0x3560c, + 0x35a00, 0x35a1c, + 0x35e04, 0x35e20, + 0x35e38, 0x35e3c, + 0x35e80, 0x35e80, + 0x35e88, 0x35ea8, + 0x35eb0, 0x35eb4, + 0x35ec8, 0x35ed4, + 0x35fb8, 0x36004, + 0x36208, 0x3623c, + 0x36600, 0x36630, + 0x36a00, 0x36abc, + 0x36b00, 0x36b70, + 0x37000, 0x37048, + 0x37060, 0x3709c, + 0x370f0, 0x37148, + 0x37160, 0x3719c, + 0x371f0, 0x372e4, + 0x372f8, 0x373e4, + 0x373f8, 0x37448, + 0x37460, 0x3749c, + 0x374f0, 0x37548, + 0x37560, 0x3759c, + 0x375f0, 0x376e4, + 0x376f8, 0x377e4, + 0x377f8, 0x377fc, + 0x37814, 0x37814, + 0x3782c, 0x3782c, + 0x37880, 0x3788c, + 0x378e8, 0x378ec, + 0x37900, 0x37948, + 0x37960, 0x3799c, + 0x379f0, 0x37ae4, + 0x37af8, 0x37b10, + 0x37b28, 0x37b28, + 0x37b3c, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c28, 0x37c28, + 0x37c3c, 0x37c50, + 0x37cf0, 0x37cfc, + 0x38000, 0x38030, + 0x38100, 0x38144, + 0x38190, 0x381d0, + 0x38200, 0x38318, + 0x38400, 0x3852c, + 0x38540, 0x3861c, + 0x38800, 0x38834, + 0x388c0, 0x38908, + 0x38910, 0x389ac, + 0x38a00, 0x38a04, + 0x38a0c, 0x38a2c, + 0x38a44, 0x38a50, + 0x38a74, 0x38c24, + 0x38d08, 0x38d14, + 0x38d1c, 0x38d20, + 0x38d3c, 0x38d50, + 0x39200, 0x3920c, + 0x39220, 0x39220, + 0x39240, 0x39240, + 0x39600, 0x39600, + 0x39608, 0x3960c, + 0x39a00, 0x39a1c, + 0x39e04, 0x39e20, + 0x39e38, 0x39e3c, + 0x39e80, 0x39e80, + 0x39e88, 0x39ea8, + 0x39eb0, 0x39eb4, + 0x39ec8, 0x39ed4, + 0x39fb8, 0x3a004, + 0x3a208, 0x3a23c, + 0x3a600, 0x3a630, + 0x3aa00, 0x3aabc, + 0x3ab00, 0x3ab70, + 0x3b000, 0x3b048, + 0x3b060, 0x3b09c, + 0x3b0f0, 0x3b148, + 0x3b160, 0x3b19c, + 0x3b1f0, 0x3b2e4, + 0x3b2f8, 0x3b3e4, + 0x3b3f8, 0x3b448, + 0x3b460, 0x3b49c, + 0x3b4f0, 0x3b548, + 0x3b560, 0x3b59c, + 0x3b5f0, 0x3b6e4, + 0x3b6f8, 0x3b7e4, + 0x3b7f8, 0x3b7fc, + 0x3b814, 0x3b814, + 0x3b82c, 0x3b82c, + 0x3b880, 0x3b88c, + 0x3b8e8, 0x3b8ec, + 0x3b900, 0x3b948, + 0x3b960, 0x3b99c, + 0x3b9f0, 0x3bae4, + 0x3baf8, 0x3bb10, + 0x3bb28, 0x3bb28, + 0x3bb3c, 0x3bb50, + 0x3bbf0, 0x3bc10, + 0x3bc28, 0x3bc28, + 0x3bc3c, 0x3bc50, + 0x3bcf0, 0x3bcfc, + 0x3c000, 0x3c030, + 0x3c100, 0x3c144, + 0x3c190, 0x3c1d0, + 0x3c200, 0x3c318, + 0x3c400, 0x3c52c, + 0x3c540, 0x3c61c, + 0x3c800, 0x3c834, + 0x3c8c0, 0x3c908, + 0x3c910, 0x3c9ac, + 0x3ca00, 0x3ca04, + 0x3ca0c, 0x3ca2c, + 0x3ca44, 0x3ca50, + 0x3ca74, 0x3cc24, + 0x3cd08, 0x3cd14, + 0x3cd1c, 0x3cd20, + 0x3cd3c, 0x3cd50, + 0x3d200, 0x3d20c, + 0x3d220, 0x3d220, + 0x3d240, 0x3d240, + 0x3d600, 0x3d600, + 0x3d608, 0x3d60c, + 0x3da00, 0x3da1c, + 0x3de04, 0x3de20, + 0x3de38, 0x3de3c, + 0x3de80, 0x3de80, + 0x3de88, 0x3dea8, + 0x3deb0, 0x3deb4, + 0x3dec8, 0x3ded4, + 0x3dfb8, 0x3e004, + 0x3e208, 0x3e23c, + 0x3e600, 0x3e630, + 0x3ea00, 0x3eabc, + 0x3eb00, 0x3eb70, + 0x3f000, 0x3f048, + 0x3f060, 0x3f09c, + 0x3f0f0, 0x3f148, + 0x3f160, 0x3f19c, + 0x3f1f0, 0x3f2e4, + 0x3f2f8, 0x3f3e4, + 0x3f3f8, 0x3f448, + 0x3f460, 0x3f49c, + 0x3f4f0, 0x3f548, + 0x3f560, 0x3f59c, + 0x3f5f0, 0x3f6e4, + 0x3f6f8, 0x3f7e4, + 0x3f7f8, 0x3f7fc, + 0x3f814, 0x3f814, + 0x3f82c, 0x3f82c, + 0x3f880, 0x3f88c, + 0x3f8e8, 0x3f8ec, + 0x3f900, 0x3f948, + 0x3f960, 0x3f99c, + 0x3f9f0, 0x3fae4, + 0x3faf8, 0x3fb10, + 0x3fb28, 0x3fb28, + 0x3fb3c, 0x3fb50, + 0x3fbf0, 0x3fc10, + 0x3fc28, 0x3fc28, + 0x3fc3c, 0x3fc50, + 0x3fcf0, 0x3fcfc, + 0x40000, 0x4000c, + 0x40040, 0x40068, + 0x40080, 0x40144, + 0x40180, 0x4018c, + 0x40200, 0x40298, + 0x402ac, 0x4033c, + 0x403f8, 0x403fc, + 0x41304, 0x413c4, + 0x41400, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x44078, + 0x440c0, 0x44278, + 0x442c0, 0x44478, + 0x444c0, 0x44678, + 0x446c0, 0x44878, + 0x448c0, 0x449fc, + 0x45000, 0x45068, + 0x45080, 0x45084, + 0x450a0, 0x450b0, + 0x45200, 0x45268, + 0x45280, 0x45284, + 0x452a0, 0x452b0, + 0x460c0, 0x460e4, + 0x47000, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x47814, + 0x48000, 0x4800c, + 0x48040, 0x48068, + 0x48080, 0x48144, + 0x48180, 0x4818c, + 0x48200, 0x48298, + 0x482ac, 0x4833c, + 0x483f8, 0x483fc, + 0x49304, 0x493c4, + 0x49400, 0x4941c, + 0x49480, 0x494d0, + 0x4c000, 0x4c078, + 0x4c0c0, 0x4c278, + 0x4c2c0, 0x4c478, + 0x4c4c0, 0x4c678, + 0x4c6c0, 0x4c878, + 0x4c8c0, 0x4c9fc, + 0x4d000, 0x4d068, + 0x4d080, 0x4d084, + 0x4d0a0, 0x4d0b0, + 0x4d200, 0x4d268, + 0x4d280, 0x4d284, + 0x4d2a0, 0x4d2b0, + 0x4e0c0, 0x4e0e4, + 0x4f000, 0x4f08c, + 0x4f200, 0x4f250, + 0x4f400, 0x4f420, + 0x4f600, 0x4f618, + 0x4f800, 0x4f814, + 0x50000, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x5101c, + 0x51300, 0x51308, + }; + int i; struct adapter *ap = netdev2adap(dev); + static const unsigned int *reg_ranges; + int arr_size = 0, buf_size = 0; + + if (is_t4(ap->params.chip)) { + reg_ranges = &t4_reg_ranges[0]; + arr_size = ARRAY_SIZE(t4_reg_ranges); + buf_size = T4_REGMAP_SIZE; + } else { + reg_ranges = &t5_reg_ranges[0]; + arr_size = ARRAY_SIZE(t5_reg_ranges); + buf_size = T5_REGMAP_SIZE; + } regs->version = mk_adap_vers(ap); - memset(buf, 0, T4_REGMAP_SIZE); - for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) + memset(buf, 0, buf_size); + for (i = 0; i < arr_size; i += 2) reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); } @@ -1383,6 +2253,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) else if (type == FW_PORT_TYPE_FIBER_XFI || type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) v |= SUPPORTED_FIBRE; + else if (type == FW_PORT_TYPE_BP40_BA) + v |= SUPPORTED_40000baseSR4_Full; if (caps & FW_PORT_CAP_ANEG) v |= SUPPORTED_Autoneg; @@ -1399,6 +2271,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps) v |= FW_PORT_CAP_SPEED_1G; if (caps & ADVERTISED_10000baseT_Full) v |= FW_PORT_CAP_SPEED_10G; + if (caps & ADVERTISED_40000baseSR4_Full) + v |= FW_PORT_CAP_SPEED_40G; return v; } @@ -1413,12 +2287,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || p->port_type == FW_PORT_TYPE_FIBER_XAUI) cmd->port = PORT_FIBRE; - else if (p->port_type == FW_PORT_TYPE_SFP) { - if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || - p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + else if (p->port_type == FW_PORT_TYPE_SFP || + p->port_type == FW_PORT_TYPE_QSFP_10G || + p->port_type == FW_PORT_TYPE_QSFP) { + if (p->mod_type == FW_PORT_MOD_TYPE_LR || + p->mod_type == FW_PORT_MOD_TYPE_SR || + p->mod_type == FW_PORT_MOD_TYPE_ER || + p->mod_type == FW_PORT_MOD_TYPE_LRM) + cmd->port = PORT_FIBRE; + else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) cmd->port = PORT_DA; else - cmd->port = PORT_FIBRE; + cmd->port = PORT_OTHER; } else cmd->port = PORT_OTHER; @@ -1447,12 +2328,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static unsigned int speed_to_caps(int speed) { - if (speed == SPEED_100) + if (speed == 100) return FW_PORT_CAP_SPEED_100M; - if (speed == SPEED_1000) + if (speed == 1000) return FW_PORT_CAP_SPEED_1G; - if (speed == SPEED_10000) + if (speed == 10000) return FW_PORT_CAP_SPEED_10G; + if (speed == 40000) + return FW_PORT_CAP_SPEED_40G; return 0; } @@ -1480,8 +2363,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->autoneg == AUTONEG_DISABLE) { cap = speed_to_caps(speed); - if (!(lc->supported & cap) || (speed == SPEED_1000) || - (speed == SPEED_10000)) + if (!(lc->supported & cap) || + (speed == 1000) || + (speed == 10000) || + (speed == 40000)) return -EINVAL; lc->requested_speed = cap; lc->advertising = 0; @@ -1618,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap, } /** - * set_rxq_intr_params - set a queue's interrupt holdoff parameters - * @adap: the adapter + * set_rspq_intr_params - set a queue's interrupt holdoff parameters * @q: the Rx queue * @us: the hold-off time in us, or 0 to disable timer * @cnt: the hold-off packet count, or 0 to disable counter @@ -1627,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap, * Sets an Rx queue's interrupt hold-off time and packet count. At least * one of the two needs to be enabled for the queue to generate interrupts. */ -static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, - unsigned int us, unsigned int cnt) +static int set_rspq_intr_params(struct sge_rspq *q, + unsigned int us, unsigned int cnt) { + struct adapter *adap = q->adap; + if ((us | cnt) == 0) cnt = 1; @@ -1656,13 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, return 0; } -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +/** + * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! + * @dev: the network device + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Set the RX interrupt hold-off parameters for a network device. + */ +static int set_rx_intr_params(struct net_device *dev, + unsigned int us, unsigned int cnt) { - const struct port_info *pi = netdev_priv(dev); + int i, err; + struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (i = 0; i < pi->nqsets; i++, q++) { + err = set_rspq_intr_params(&q->rspq, us, cnt); + if (err) + return err; + } + return 0; +} - return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, - c->rx_coalesce_usecs, c->rx_max_coalesced_frames); +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + return set_rx_intr_params(dev, c->rx_coalesce_usecs, + c->rx_max_coalesced_frames); } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) @@ -1860,14 +2767,14 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features) netdev_features_t changed = dev->features ^ features; int err; - if (!(changed & NETIF_F_HW_VLAN_RX)) + if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) return 0; err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1, - !!(features & NETIF_F_HW_VLAN_RX), true); + !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); if (unlikely(err)) - dev->features = features ^ NETIF_F_HW_VLAN_RX; + dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; return err; } @@ -1878,7 +2785,7 @@ static u32 get_rss_table_size(struct net_device *dev) return pi->rss_size; } -static int get_rss_table(struct net_device *dev, u32 *p) +static int get_rss_table(struct net_device *dev, u32 *p, u8 *key) { const struct port_info *pi = netdev_priv(dev); unsigned int n = pi->rss_size; @@ -1888,7 +2795,7 @@ static int get_rss_table(struct net_device *dev, u32 *p) return 0; } -static int set_rss_table(struct net_device *dev, const u32 *p) +static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key) { unsigned int i; struct port_info *pi = netdev_priv(dev); @@ -1990,26 +2897,19 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_wol = set_wol, .get_rxnfc = get_rxnfc, .get_rxfh_indir_size = get_rss_table_size, - .get_rxfh_indir = get_rss_table, - .set_rxfh_indir = set_rss_table, + .get_rxfh = get_rss_table, + .set_rxfh = set_rss_table, .flash_device = set_flash, }; /* * debugfs support */ - -static int mem_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static ssize_t mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { loff_t pos = *ppos; - loff_t avail = file->f_path.dentry->d_inode->i_size; + loff_t avail = file_inode(file)->i_size; unsigned int mem = (uintptr_t)file->private_data & 3; struct adapter *adap = file->private_data - mem; @@ -2025,8 +2925,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, int ret, ofst; __be32 data[16]; - if (mem == MEM_MC) - ret = t4_mc_read(adap, pos, data, NULL); + if ((mem == MEM_MC) || (mem == MEM_MC1)) + ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL); else ret = t4_edc_read(adap, mem, pos, data, NULL); if (ret) @@ -2048,13 +2948,13 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, static const struct file_operations mem_debugfs_fops = { .owner = THIS_MODULE, - .open = mem_open, + .open = simple_open, .read = mem_read, .llseek = default_llseek, }; -static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, - unsigned int idx, unsigned int size_mb) +static void add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) { struct dentry *de; @@ -2064,21 +2964,40 @@ static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, de->d_inode->i_size = size_mb << 20; } -static int __devinit setup_debugfs(struct adapter *adap) +static int setup_debugfs(struct adapter *adap) { int i; + u32 size; if (IS_ERR_OR_NULL(adap->debugfs_root)) return -1; i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); - if (i & EDRAM0_ENABLE) - add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); - if (i & EDRAM1_ENABLE) - add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); - if (i & EXT_MEM_ENABLE) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); + if (i & EDRAM0_ENABLE) { + size = t4_read_reg(adap, MA_EDRAM0_BAR); + add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size)); + } + if (i & EDRAM1_ENABLE) { + size = t4_read_reg(adap, MA_EDRAM1_BAR); + add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); + } + if (is_t4(adap->params.chip)) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); + if (i & EXT_MEM_ENABLE) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_GET(size)); + } else { + if (i & EXT_MEM_ENABLE) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); + add_debugfs_mem(adap, "mc0", MEM_MC0, + EXT_MEM_SIZE_GET(size)); + } + if (i & EXT_MEM1_ENABLE) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR); + add_debugfs_mem(adap, "mc1", MEM_MC1, + EXT_MEM_SIZE_GET(size)); + } + } if (adap->l2t) debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, &t4_l2t_fops); @@ -2100,7 +3019,7 @@ int cxgb4_alloc_atid(struct tid_info *t, void *data) if (t->afree) { union aopen_entry *p = t->afree; - atid = p - t->atid_tab; + atid = (p - t->atid_tab) + t->atid_base; t->afree = p->next; p->data = data; t->atids_in_use++; @@ -2115,7 +3034,7 @@ EXPORT_SYMBOL(cxgb4_alloc_atid); */ void cxgb4_free_atid(struct tid_info *t, unsigned int atid) { - union aopen_entry *p = &t->atid_tab[atid]; + union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; spin_lock_bh(&t->atid_lock); p->next = t->afree; @@ -2147,26 +3066,70 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) if (stid >= 0) { t->stid_tab[stid].data = data; stid += t->stid_base; - t->stids_in_use++; + /* IPv6 requires max of 520 bits or 16 cells in TCAM + * This is equivalent to 4 TIDs. With CLIP enabled it + * needs 2 TIDs. + */ + if (family == PF_INET) + t->stids_in_use++; + else + t->stids_in_use += 4; } spin_unlock_bh(&t->stid_lock); return stid; } EXPORT_SYMBOL(cxgb4_alloc_stid); -/* - * Release a server TID. +/* Allocate a server filter TID and set it to the supplied value. + */ +int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) +{ + int stid; + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) { + stid = find_next_zero_bit(t->stid_bmap, + t->nstids + t->nsftids, t->nstids); + if (stid < (t->nstids + t->nsftids)) + __set_bit(stid, t->stid_bmap); + else + stid = -1; + } else { + stid = -1; + } + if (stid >= 0) { + t->stid_tab[stid].data = data; + stid -= t->nstids; + stid += t->sftid_base; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} +EXPORT_SYMBOL(cxgb4_alloc_sftid); + +/* Release a server TID. */ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) { - stid -= t->stid_base; + /* Is it a server filter TID? */ + if (t->nsftids && (stid >= t->sftid_base)) { + stid -= t->sftid_base; + stid += t->nstids; + } else { + stid -= t->stid_base; + } + spin_lock_bh(&t->stid_lock); if (family == PF_INET) __clear_bit(stid, t->stid_bmap); else bitmap_release_region(t->stid_bmap, stid, 2); t->stid_tab[stid].data = NULL; - t->stids_in_use--; + if (family == PF_INET) + t->stids_in_use--; + else + t->stids_in_use -= 4; spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); @@ -2201,7 +3164,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, adap->tid_release_head = (void **)((uintptr_t)p | chan); if (!adap->tid_release_task_busy) { adap->tid_release_task_busy = true; - schedule_work(&adap->tid_release_task); + queue_work(workq, &adap->tid_release_task); } spin_unlock_bh(&adap->tid_release_lock); } @@ -2267,18 +3230,27 @@ EXPORT_SYMBOL(cxgb4_remove_tid); static int tid_init(struct tid_info *t) { size_t size; + unsigned int stid_bmap_size; unsigned int natids = t->natids; + struct adapter *adap = container_of(t, struct adapter, tids); - size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + + stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); + size = t->ntids * sizeof(*t->tid_tab) + + natids * sizeof(*t->atid_tab) + t->nstids * sizeof(*t->stid_tab) + - BITS_TO_LONGS(t->nstids) * sizeof(long); + t->nsftids * sizeof(*t->stid_tab) + + stid_bmap_size * sizeof(long) + + t->nftids * sizeof(*t->ftid_tab) + + t->nsftids * sizeof(*t->ftid_tab); + t->tid_tab = t4_alloc_mem(size); if (!t->tid_tab) return -ENOMEM; t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; - t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; + t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; + t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; spin_lock_init(&t->stid_lock); spin_lock_init(&t->atid_lock); @@ -2293,10 +3265,47 @@ static int tid_init(struct tid_info *t) t->atid_tab[natids - 1].next = &t->atid_tab[natids]; t->afree = t->atid_tab; } - bitmap_zero(t->stid_bmap, t->nstids); + bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); + /* Reserve stid 0 for T4/T5 adapters */ + if (!t->stid_base && + (is_t4(adap->params.chip) || is_t5(adap->params.chip))) + __set_bit(0, t->stid_bmap); + return 0; } +static int cxgb4_clip_get(const struct net_device *dev, + const struct in6_addr *lip) +{ + struct adapter *adap; + struct fw_clip_cmd c; + + adap = netdev2adap(dev); + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); + c.ip_hi = *(__be64 *)(lip->s6_addr); + c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +static int cxgb4_clip_release(const struct net_device *dev, + const struct in6_addr *lip) +{ + struct adapter *adap; + struct fw_clip_cmd c; + + adap = netdev2adap(dev); + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); + c.ip_hi = *(__be64 *)(lip->s6_addr); + c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + /** * cxgb4_create_server - create an IP server * @dev: the device @@ -2309,12 +3318,14 @@ static int tid_init(struct tid_info *t) * Returns <0 on error and one of the %NET_XMIT_* values on success. */ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, - __be32 sip, __be16 sport, unsigned int queue) + __be32 sip, __be16 sport, __be16 vlan, + unsigned int queue) { unsigned int chan; struct sk_buff *skb; struct adapter *adap; struct cpl_pass_open_req *req; + int ret; skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) @@ -2332,10 +3343,78 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, req->opt0 = cpu_to_be64(TX_CHAN(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); - return t4_mgmt_tx(adap, skb); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); } EXPORT_SYMBOL(cxgb4_create_server); +/* cxgb4_create_server6 - create an IPv6 server + * @dev: the device + * @stid: the server TID + * @sip: local IPv6 address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IPv6 server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req6 *req; + int ret; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip_hi = *(__be64 *)(sip->s6_addr); + req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); + req->peer_ip_hi = cpu_to_be64(0); + req->peer_ip_lo = cpu_to_be64(0); + chan = rxq_to_chan(&adap->sge, queue); + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_create_server6); + +int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6) +{ + struct sk_buff *skb; + struct adapter *adap; + struct cpl_close_listsvr_req *req; + int ret; + + adap = netdev2adap(dev); + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); + req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : + LISTSVR_IPV6(0)) | QUEUENO(queue)); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_remove_server); + /** * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU * @mtus: the HW MTU table @@ -2360,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, EXPORT_SYMBOL(cxgb4_best_mtu); /** + * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned + * @mtus: the HW MTU table + * @header_size: Header Size + * @data_size_max: maximum Data Segment Size + * @data_size_align: desired Data Segment Size Alignment (2^N) + * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) + * + * Similar to cxgb4_best_mtu() but instead of searching the Hardware + * MTU Table based solely on a Maximum MTU parameter, we break that + * parameter up into a Header Size and Maximum Data Segment Size, and + * provide a desired Data Segment Size Alignment. If we find an MTU in + * the Hardware MTU Table which will result in a Data Segment Size with + * the requested alignment _and_ that MTU isn't "too far" from the + * closest MTU, then we'll return that rather than the closest MTU. + */ +unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, + unsigned short header_size, + unsigned short data_size_max, + unsigned short data_size_align, + unsigned int *mtu_idxp) +{ + unsigned short max_mtu = header_size + data_size_max; + unsigned short data_size_align_mask = data_size_align - 1; + int mtu_idx, aligned_mtu_idx; + + /* Scan the MTU Table till we find an MTU which is larger than our + * Maximum MTU or we reach the end of the table. Along the way, + * record the last MTU found, if any, which will result in a Data + * Segment Length matching the requested alignment. + */ + for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { + unsigned short data_size = mtus[mtu_idx] - header_size; + + /* If this MTU minus the Header Size would result in a + * Data Segment Size of the desired alignment, remember it. + */ + if ((data_size & data_size_align_mask) == 0) + aligned_mtu_idx = mtu_idx; + + /* If we're not at the end of the Hardware MTU Table and the + * next element is larger than our Maximum MTU, drop out of + * the loop. + */ + if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) + break; + } + + /* If we fell out of the loop because we ran to the end of the table, + * then we just have to use the last [largest] entry. + */ + if (mtu_idx == NMTUS) + mtu_idx--; + + /* If we found an MTU which resulted in the requested Data Segment + * Length alignment and that's "not far" from the largest MTU which is + * less than or equal to the maximum MTU, then use that. + */ + if (aligned_mtu_idx >= 0 && + mtu_idx - aligned_mtu_idx <= 1) + mtu_idx = aligned_mtu_idx; + + /* If the caller has passed in an MTU Index pointer, pass the + * MTU Index back. Return the MTU value. + */ + if (mtu_idxp) + *mtu_idxp = mtu_idx; + return mtus[mtu_idx]; +} +EXPORT_SYMBOL(cxgb4_best_aligned_mtu); + +/** * cxgb4_port_chan - get the HW channel of a port * @dev: the net device for the port * @@ -2371,6 +3521,24 @@ unsigned int cxgb4_port_chan(const struct net_device *dev) } EXPORT_SYMBOL(cxgb4_port_chan); +unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) +{ + struct adapter *adap = netdev2adap(dev); + u32 v1, v2, lp_count, hp_count; + + v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); + v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); + if (is_t4(adap->params.chip)) { + lp_count = G_LP_COUNT(v1); + hp_count = G_HP_COUNT(v1); + } else { + lp_count = G_LP_COUNT_T5(v1); + hp_count = G_HP_COUNT_T5(v2); + } + return lpfifo ? lp_count : hp_count; +} +EXPORT_SYMBOL(cxgb4_dbfifo_count); + /** * cxgb4_port_viid - get the VI id of a port * @dev: the net device for the port @@ -2418,6 +3586,77 @@ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, } EXPORT_SYMBOL(cxgb4_iscsi_init); +int cxgb4_flush_eq_cache(struct net_device *dev) +{ + struct adapter *adap = netdev2adap(dev); + int ret; + + ret = t4_fwaddrspace_write(adap, adap->mbox, + 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000); + return ret; +} +EXPORT_SYMBOL(cxgb4_flush_eq_cache); + +static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) +{ + u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8; + __be64 indices; + int ret; + + ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); + if (!ret) { + *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; + *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; + } + return ret; +} + +int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, + u16 size) +{ + struct adapter *adap = netdev2adap(dev); + u16 hw_pidx, hw_cidx; + int ret; + + ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); + if (ret) + goto out; + + if (pidx != hw_pidx) { + u16 delta; + + if (pidx >= hw_pidx) + delta = pidx - hw_pidx; + else + delta = size - hw_pidx + pidx; + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(qid) | PIDX(delta)); + } +out: + return ret; +} +EXPORT_SYMBOL(cxgb4_sync_txq_pidx); + +void cxgb4_disable_db_coalescing(struct net_device *dev) +{ + struct adapter *adap; + + adap = netdev2adap(dev); + t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, + F_NOCOALESCE); +} +EXPORT_SYMBOL(cxgb4_disable_db_coalescing); + +void cxgb4_enable_db_coalescing(struct net_device *dev) +{ + struct adapter *adap; + + adap = netdev2adap(dev); + t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0); +} +EXPORT_SYMBOL(cxgb4_enable_db_coalescing); + static struct pci_driver cxgb4_driver; static void check_neigh_update(struct neighbour *neigh) @@ -2451,10 +3690,213 @@ static struct notifier_block cxgb4_netevent_nb = { .notifier_call = netevent_cb }; +static void drain_db_fifo(struct adapter *adap, int usecs) +{ + u32 v1, v2, lp_count, hp_count; + + do { + v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); + v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); + if (is_t4(adap->params.chip)) { + lp_count = G_LP_COUNT(v1); + hp_count = G_HP_COUNT(v1); + } else { + lp_count = G_LP_COUNT_T5(v1); + hp_count = G_HP_COUNT_T5(v2); + } + + if (lp_count == 0 && hp_count == 0) + break; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(usecs)); + } while (1); +} + +static void disable_txq_db(struct sge_txq *q) +{ + unsigned long flags; + + spin_lock_irqsave(&q->db_lock, flags); + q->db_disabled = 1; + spin_unlock_irqrestore(&q->db_lock, flags); +} + +static void enable_txq_db(struct adapter *adap, struct sge_txq *q) +{ + spin_lock_irq(&q->db_lock); + if (q->db_pidx_inc) { + /* Make sure that all writes to the TX descriptors + * are committed before we tell HW about them. + */ + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(q->db_pidx_inc)); + q->db_pidx_inc = 0; + } + q->db_disabled = 0; + spin_unlock_irq(&q->db_lock); +} + +static void disable_dbs(struct adapter *adap) +{ + int i; + + for_each_ethrxq(&adap->sge, i) + disable_txq_db(&adap->sge.ethtxq[i].q); + for_each_ofldrxq(&adap->sge, i) + disable_txq_db(&adap->sge.ofldtxq[i].q); + for_each_port(adap, i) + disable_txq_db(&adap->sge.ctrlq[i].q); +} + +static void enable_dbs(struct adapter *adap) +{ + int i; + + for_each_ethrxq(&adap->sge, i) + enable_txq_db(adap, &adap->sge.ethtxq[i].q); + for_each_ofldrxq(&adap->sge, i) + enable_txq_db(adap, &adap->sge.ofldtxq[i].q); + for_each_port(adap, i) + enable_txq_db(adap, &adap->sge.ctrlq[i].q); +} + +static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) +{ + if (adap->uld_handle[CXGB4_ULD_RDMA]) + ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], + cmd); +} + +static void process_db_full(struct work_struct *work) +{ + struct adapter *adap; + + adap = container_of(work, struct adapter, db_full_task); + + drain_db_fifo(adap, dbfifo_drain_delay); + enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, + DBFIFO_HP_INT | DBFIFO_LP_INT); +} + +static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) +{ + u16 hw_pidx, hw_cidx; + int ret; + + spin_lock_irq(&q->db_lock); + ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); + if (ret) + goto out; + if (q->db_pidx != hw_pidx) { + u16 delta; + + if (q->db_pidx >= hw_pidx) + delta = q->db_pidx - hw_pidx; + else + delta = q->size - hw_pidx + q->db_pidx; + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(delta)); + } +out: + q->db_disabled = 0; + q->db_pidx_inc = 0; + spin_unlock_irq(&q->db_lock); + if (ret) + CH_WARN(adap, "DB drop recovery failed.\n"); +} +static void recover_all_queues(struct adapter *adap) +{ + int i; + + for_each_ethrxq(&adap->sge, i) + sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); + for_each_ofldrxq(&adap->sge, i) + sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); + for_each_port(adap, i) + sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); +} + +static void process_db_drop(struct work_struct *work) +{ + struct adapter *adap; + + adap = container_of(work, struct adapter, db_drop_task); + + if (is_t4(adap->params.chip)) { + drain_db_fifo(adap, dbfifo_drain_delay); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); + drain_db_fifo(adap, dbfifo_drain_delay); + recover_all_queues(adap); + drain_db_fifo(adap, dbfifo_drain_delay); + enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); + } else { + u32 dropped_db = t4_read_reg(adap, 0x010ac); + u16 qid = (dropped_db >> 15) & 0x1ffff; + u16 pidx_inc = dropped_db & 0x1fff; + unsigned int s_qpp; + unsigned short udb_density; + unsigned long qpshift; + int page; + u32 udb; + + dev_warn(adap->pdev_dev, + "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n", + dropped_db, qid, + (dropped_db >> 14) & 1, + (dropped_db >> 13) & 1, + pidx_inc); + + drain_db_fifo(adap, 1); + + s_qpp = QUEUESPERPAGEPF1 * adap->fn; + udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap, + SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); + qpshift = PAGE_SHIFT - ilog2(udb_density); + udb = qid << qpshift; + udb &= PAGE_MASK; + page = udb / PAGE_SIZE; + udb += (qid - (page * udb_density)) * 128; + + writel(PIDX(pidx_inc), adap->bar2 + udb + 8); + + /* Re-enable BAR2 WC */ + t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); + } + + t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); +} + +void t4_db_full(struct adapter *adap) +{ + if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, 0); + queue_work(workq, &adap->db_full_task); + } +} + +void t4_db_dropped(struct adapter *adap) +{ + if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); + } + queue_work(workq, &adap->db_drop_task); +} + static void uld_attach(struct adapter *adap, unsigned int uld) { void *handle; struct cxgb4_lld_info lli; + unsigned short i; lli.pdev = adap->pdev; lli.l2t = adap->l2t; @@ -2464,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.mtus = adap->params.mtus; if (uld == CXGB4_ULD_RDMA) { lli.rxq_ids = adap->sge.rdma_rxq; + lli.ciq_ids = adap->sge.rdma_ciq; lli.nrxq = adap->sge.rdmaqs; + lli.nciq = adap->sge.rdmaciqs; } else if (uld == CXGB4_ULD_ISCSI) { lli.rxq_ids = adap->sge.ofld_rxq; lli.nrxq = adap->sge.ofldqsets; @@ -2473,7 +3917,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.nchan = adap->params.nports; lli.nports = adap->params.nports; lli.wr_cred = adap->params.ofldq_wr_cred; - lli.adapter_type = adap->params.rev; + lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> @@ -2481,9 +3925,17 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> (adap->fn * 4)); + lli.filt_mode = adap->params.tp.vlan_pri_map; + /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ + for (i = 0; i < NCHAN; i++) + lli.tx_modq[i] = i; lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); lli.fw_vers = adap->params.fw_vers; + lli.dbfifo_int_thresh = dbfifo_int_thresh; + lli.sge_pktshift = adap->sge.pktshift; + lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; handle = ulds[uld].add(&lli); if (IS_ERR(handle)) { @@ -2508,6 +3960,10 @@ static void attach_ulds(struct adapter *adap) { unsigned int i; + spin_lock(&adap_rcu_lock); + list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list); + spin_unlock(&adap_rcu_lock); + mutex_lock(&uld_mutex); list_add_tail(&adap->list_node, &adapter_list); for (i = 0; i < CXGB4_ULD_MAX; i++) @@ -2533,6 +3989,10 @@ static void detach_ulds(struct adapter *adap) netevent_registered = false; } mutex_unlock(&uld_mutex); + + spin_lock(&adap_rcu_lock); + list_del_rcu(&adap->rcu_node); + spin_unlock(&adap_rcu_lock); } static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) @@ -2596,6 +4056,166 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) } EXPORT_SYMBOL(cxgb4_unregister_uld); +/* Check if netdev on which event is occured belongs to us or not. Return + * success (true) if it belongs otherwise failure (false). + * Called with rcu_read_lock() held. + */ +static bool cxgb4_netdev(const struct net_device *netdev) +{ + struct adapter *adap; + int i; + + list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node) + for (i = 0; i < MAX_NPORTS; i++) + if (adap->port[i] == netdev) + return true; + return false; +} + +static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, + unsigned long event) +{ + int ret = NOTIFY_DONE; + + rcu_read_lock(); + if (cxgb4_netdev(event_dev)) { + switch (event) { + case NETDEV_UP: + ret = cxgb4_clip_get(event_dev, + (const struct in6_addr *)ifa->addr.s6_addr); + if (ret < 0) { + rcu_read_unlock(); + return ret; + } + ret = NOTIFY_OK; + break; + case NETDEV_DOWN: + cxgb4_clip_release(event_dev, + (const struct in6_addr *)ifa->addr.s6_addr); + ret = NOTIFY_OK; + break; + default: + break; + } + } + rcu_read_unlock(); + return ret; +} + +static int cxgb4_inet6addr_handler(struct notifier_block *this, + unsigned long event, void *data) +{ + struct inet6_ifaddr *ifa = data; + struct net_device *event_dev; + int ret = NOTIFY_DONE; + struct bonding *bond = netdev_priv(ifa->idev->dev); + struct list_head *iter; + struct slave *slave; + struct pci_dev *first_pdev = NULL; + + if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) { + event_dev = vlan_dev_real_dev(ifa->idev->dev); + ret = clip_add(event_dev, ifa, event); + } else if (ifa->idev->dev->flags & IFF_MASTER) { + /* It is possible that two different adapters are bonded in one + * bond. We need to find such different adapters and add clip + * in all of them only once. + */ + read_lock(&bond->lock); + bond_for_each_slave(bond, slave, iter) { + if (!first_pdev) { + ret = clip_add(slave->dev, ifa, event); + /* If clip_add is success then only initialize + * first_pdev since it means it is our device + */ + if (ret == NOTIFY_OK) + first_pdev = to_pci_dev( + slave->dev->dev.parent); + } else if (first_pdev != + to_pci_dev(slave->dev->dev.parent)) + ret = clip_add(slave->dev, ifa, event); + } + read_unlock(&bond->lock); + } else + ret = clip_add(ifa->idev->dev, ifa, event); + + return ret; +} + +static struct notifier_block cxgb4_inet6addr_notifier = { + .notifier_call = cxgb4_inet6addr_handler +}; + +/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with + * a physical device. + * The physical device reference is needed to send the actul CLIP command. + */ +static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) +{ + struct inet6_dev *idev = NULL; + struct inet6_ifaddr *ifa; + int ret = 0; + + idev = __in6_dev_get(root_dev); + if (!idev) + return ret; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + ret = cxgb4_clip_get(dev, + (const struct in6_addr *)ifa->addr.s6_addr); + if (ret < 0) + break; + } + read_unlock_bh(&idev->lock); + + return ret; +} + +static int update_root_dev_clip(struct net_device *dev) +{ + struct net_device *root_dev = NULL; + int i, ret = 0; + + /* First populate the real net device's IPv6 addresses */ + ret = update_dev_clip(dev, dev); + if (ret) + return ret; + + /* Parse all bond and vlan devices layered on top of the physical dev */ + for (i = 0; i < VLAN_N_VID; i++) { + root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); + if (!root_dev) + continue; + + ret = update_dev_clip(root_dev, dev); + if (ret) + break; + } + return ret; +} + +static void update_clip(const struct adapter *adap) +{ + int i; + struct net_device *dev; + int ret; + + rcu_read_lock(); + + for (i = 0; i < MAX_NPORTS; i++) { + dev = adap->port[i]; + ret = 0; + + if (dev) + ret = update_root_dev_clip(dev); + + if (ret < 0) + break; + } + rcu_read_unlock(); +} + /** * cxgb_up - enable the adapter * @adap: adapter being enabled @@ -2641,6 +4261,7 @@ static int cxgb_up(struct adapter *adap) t4_intr_enable(adap); adap->flags |= FULL_INIT_DONE; notify_ulds(adap, CXGB4_STATE_UP); + update_clip(adap); out: return err; irq_err: @@ -2654,6 +4275,8 @@ static void cxgb_down(struct adapter *adapter) { t4_intr_disable(adapter); cancel_work_sync(&adapter->tid_release_task); + cancel_work_sync(&adapter->db_full_task); + cancel_work_sync(&adapter->db_drop_task); adapter->tid_release_task_busy = false; adapter->tid_release_head = NULL; @@ -2701,6 +4324,131 @@ static int cxgb_close(struct net_device *dev) return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); } +/* Return an error number if the indicated filter isn't writable ... + */ +static int writable_filter(struct filter_entry *f) +{ + if (f->locked) + return -EPERM; + if (f->pending) + return -EBUSY; + + return 0; +} + +/* Delete the filter at the specified index (if valid). The checks for all + * the common problems with doing this like the filter being locked, currently + * pending in another operation, etc. + */ +static int delete_filter(struct adapter *adapter, unsigned int fidx) +{ + struct filter_entry *f; + int ret; + + if (fidx >= adapter->tids.nftids + adapter->tids.nsftids) + return -EINVAL; + + f = &adapter->tids.ftid_tab[fidx]; + ret = writable_filter(f); + if (ret) + return ret; + if (f->valid) + return del_filter_wr(adapter, fidx); + + return 0; +} + +int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, __be16 vlan, + unsigned int queue, unsigned char port, unsigned char mask) +{ + int ret; + struct filter_entry *f; + struct adapter *adap; + int i; + u8 *val; + + adap = netdev2adap(dev); + + /* Adjust stid to correct filter index */ + stid -= adap->tids.sftid_base; + stid += adap->tids.nftids; + + /* Check to make sure the filter requested is writable ... + */ + f = &adap->tids.ftid_tab[stid]; + ret = writable_filter(f); + if (ret) + return ret; + + /* Clear out any old resources being used by the filter before + * we start constructing the new filter. + */ + if (f->valid) + clear_filter(adap, f); + + /* Clear out filter specifications */ + memset(&f->fs, 0, sizeof(struct ch_filter_specification)); + f->fs.val.lport = cpu_to_be16(sport); + f->fs.mask.lport = ~0; + val = (u8 *)&sip; + if ((val[0] | val[1] | val[2] | val[3]) != 0) { + for (i = 0; i < 4; i++) { + f->fs.val.lip[i] = val[i]; + f->fs.mask.lip[i] = ~0; + } + if (adap->params.tp.vlan_pri_map & F_PORT) { + f->fs.val.iport = port; + f->fs.mask.iport = mask; + } + } + + if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { + f->fs.val.proto = IPPROTO_TCP; + f->fs.mask.proto = ~0; + } + + f->fs.dirsteer = 1; + f->fs.iq = queue; + /* Mark filter as locked */ + f->locked = 1; + f->fs.rpttid = 1; + + ret = set_filter_wr(adap, stid); + if (ret) { + clear_filter(adap, f); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(cxgb4_create_server_filter); + +int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6) +{ + int ret; + struct filter_entry *f; + struct adapter *adap; + + adap = netdev2adap(dev); + + /* Adjust stid to correct filter index */ + stid -= adap->tids.sftid_base; + stid += adap->tids.nftids; + + f = &adap->tids.ftid_tab[stid]; + /* Unlock the filter */ + f->locked = 0; + + ret = delete_filter(adap, stid); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(cxgb4_remove_server_filter); + static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, struct rtnl_link_stats64 *ns) { @@ -2708,7 +4456,15 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, struct port_info *p = netdev_priv(dev); struct adapter *adapter = p->adapter; + /* Block retrieving statistics during EEH error + * recovery. Otherwise, the recovery might fail + * and the PCI device will be removed permanently + */ spin_lock(&adapter->stats_lock); + if (!netif_device_present(dev)) { + spin_unlock(&adapter->stats_lock); + return ns; + } t4_get_port_stats(adapter, p->tx_chan, &stats); spin_unlock(&adapter->stats_lock); @@ -2809,7 +4565,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) struct port_info *pi = netdev_priv(dev); if (!is_valid_ether_addr(addr->sa_data)) - return -EINVAL; + return -EADDRNOTAVAIL; ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, pi->xact_addr_filt, addr->sa_data, true, true); @@ -2863,18 +4619,32 @@ void t4_fatal_err(struct adapter *adap) static void setup_memwin(struct adapter *adap) { - u32 bar0; + u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ + if (is_t4(adap->params.chip)) { + mem_win0_base = bar0 + MEMWIN0_BASE; + mem_win1_base = bar0 + MEMWIN1_BASE; + mem_win2_base = bar0 + MEMWIN2_BASE; + } else { + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win0_base = MEMWIN0_BASE; + mem_win1_base = MEMWIN1_BASE_T5; + mem_win2_base = MEMWIN2_BASE_T5; + } t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), - (bar0 + MEMWIN0_BASE) | BIR(0) | + mem_win0_base | BIR(0) | WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), - (bar0 + MEMWIN1_BASE) | BIR(0) | + mem_win1_base | BIR(0) | WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), - (bar0 + MEMWIN2_BASE) | BIR(0) | + mem_win2_base | BIR(0) | WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); +} + +static void setup_memwin_rdma(struct adapter *adap) +{ if (adap->vres.ocq.size) { unsigned int start, sz_kb; @@ -2901,7 +4671,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) memset(c, 0, sizeof(*c)); c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ); - c->retval_len16 = htonl(FW_LEN16(*c)); + c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); if (ret < 0) return ret; @@ -2943,6 +4713,34 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) v = t4_read_reg(adap, TP_PIO_DATA); t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); + /* first 4 Tx modulation queues point to consecutive Tx channels */ + adap->params.tp.tx_modq_map = 0xE4; + t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, + V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map)); + + /* associate each Tx modulation queue with consecutive Tx channels */ + v = 0x84218421; + t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, + &v, 1, A_TP_TX_SCHED_HDR); + t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, + &v, 1, A_TP_TX_SCHED_FIFO); + t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, + &v, 1, A_TP_TX_SCHED_PCMD); + +#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ + if (is_offload(adap)) { + t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, + V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); + t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT, + V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); + } + /* get basic stuff going */ return t4_early_init(adap, adap->fn); } @@ -2954,6 +4752,543 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) /* * Phase 0 of initialization: contact FW, obtain config, perform basic init. + * + * If the firmware we're dealing with has Configuration File support, then + * we use that to perform all configuration + */ + +/* + * Tweak configuration based on module parameters, etc. Most of these have + * defaults assigned to them by Firmware Configuration Files (if we're using + * them) but need to be explicitly set if we're using hard-coded + * initialization. But even in the case of using Firmware Configuration + * Files, we'd like to expose the ability to change these via module + * parameters so these are essentially common tweaks/settings for + * Configuration Files and hard-coded initialization ... + */ +static int adap_init0_tweaks(struct adapter *adapter) +{ + /* + * Fix up various Host-Dependent Parameters like Page Size, Cache + * Line Size, etc. The firmware default is for a 4KB Page Size and + * 64B Cache Line Size ... + */ + t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES); + + /* + * Process module parameters which affect early initialization. + */ + if (rx_dma_offset != 2 && rx_dma_offset != 0) { + dev_err(&adapter->pdev->dev, + "Ignoring illegal rx_dma_offset=%d, using 2\n", + rx_dma_offset); + rx_dma_offset = 2; + } + t4_set_reg_field(adapter, SGE_CONTROL, + PKTSHIFT_MASK, + PKTSHIFT(rx_dma_offset)); + + /* + * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux + * adds the pseudo header itself. + */ + t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG, + CSUM_HAS_PSEUDO_HDR, 0); + + return 0; +} + +/* + * Attempt to initialize the adapter via a Firmware Configuration File. + */ +static int adap_init0_config(struct adapter *adapter, int reset) +{ + struct fw_caps_config_cmd caps_cmd; + const struct firmware *cf; + unsigned long mtype = 0, maddr = 0; + u32 finiver, finicsum, cfcsum; + int ret; + int config_issued = 0; + char *fw_config_file, fw_config_file_path[256]; + char *config_name = NULL; + + /* + * Reset device if necessary. + */ + if (reset) { + ret = t4_fw_reset(adapter, adapter->mbox, + PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + } + + /* + * If we have a T4 configuration file under /lib/firmware/cxgb4/, + * then use that. Otherwise, use the configuration file stored + * in the adapter flash ... + */ + switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { + case CHELSIO_T4: + fw_config_file = FW4_CFNAME; + break; + case CHELSIO_T5: + fw_config_file = FW5_CFNAME; + break; + default: + dev_err(adapter->pdev_dev, "Device %d is not supported\n", + adapter->pdev->device); + ret = -EINVAL; + goto bye; + } + + ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); + if (ret < 0) { + config_name = "On FLASH"; + mtype = FW_MEMTYPE_CF_FLASH; + maddr = t4_flash_cfg_addr(adapter); + } else { + u32 params[7], val[7]; + + sprintf(fw_config_file_path, + "/lib/firmware/%s", fw_config_file); + config_name = fw_config_file_path; + + if (cf->size >= FLASH_CFG_MAX_SIZE) + ret = -ENOMEM; + else { + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + ret = t4_query_params(adapter, adapter->mbox, + adapter->fn, 0, 1, params, val); + if (ret == 0) { + /* + * For t4_memory_write() below addresses and + * sizes have to be in terms of multiples of 4 + * bytes. So, if the Configuration File isn't + * a multiple of 4 bytes in length we'll have + * to write that out separately since we can't + * guarantee that the bytes following the + * residual byte in the buffer returned by + * request_firmware() are zeroed out ... + */ + size_t resid = cf->size & 0x3; + size_t size = cf->size & ~0x3; + __be32 *data = (__be32 *)cf->data; + + mtype = FW_PARAMS_PARAM_Y_GET(val[0]); + maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16; + + ret = t4_memory_write(adapter, mtype, maddr, + size, data); + if (ret == 0 && resid != 0) { + union { + __be32 word; + char buf[4]; + } last; + int i; + + last.word = data[size >> 2]; + for (i = resid; i < 4; i++) + last.buf[i] = 0; + ret = t4_memory_write(adapter, mtype, + maddr + size, + 4, &last.word); + } + } + } + + release_firmware(cf); + if (ret) + goto bye; + } + + /* + * Issue a Capability Configuration command to the firmware to get it + * to parse the Configuration File. We don't use t4_fw_config_file() + * because we want the ability to modify various features after we've + * processed the configuration file ... + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = + htonl(FW_CAPS_CONFIG_CMD_CFVALID | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + + /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the firmware. A + * very few early versions of the firmware didn't have one embedded + * but we can ignore those. + */ + if (ret == -ENOENT) { + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, + sizeof(caps_cmd), &caps_cmd); + config_name = "Firmware Default"; + } + + config_issued = 1; + if (ret < 0) + goto bye; + + finiver = ntohl(caps_cmd.finiver); + finicsum = ntohl(caps_cmd.finicsum); + cfcsum = ntohl(caps_cmd.cfcsum); + if (finicsum != cfcsum) + dev_warn(adapter->pdev_dev, "Configuration File checksum "\ + "mismatch: [fini] csum=%#x, computed csum=%#x\n", + finicsum, cfcsum); + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + NULL); + if (ret < 0) + goto bye; + + /* + * Tweak configuration based on system architecture, module + * parameters, etc. + */ + ret = adap_init0_tweaks(adapter); + if (ret < 0) + goto bye; + + /* + * And finally tell the firmware to initialize itself using the + * parameters from the Configuration File. + */ + ret = t4_fw_initialize(adapter, adapter->mbox); + if (ret < 0) + goto bye; + + /* + * Return successfully and note that we're operating with parameters + * not supplied by the driver, rather than from hard-wired + * initialization constants burried in the driver. + */ + adapter->flags |= USING_SOFT_PARAMS; + dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ + "Configuration File \"%s\", version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); + return 0; + + /* + * Something bad happened. Return the error ... (If the "error" + * is that there's no Configuration File on the adapter we don't + * want to issue a warning since this is fairly common.) + */ +bye: + if (config_issued && ret != -ENOENT) + dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", + config_name, -ret); + return ret; +} + +/* + * Attempt to initialize the adapter via hard-coded, driver supplied + * parameters ... + */ +static int adap_init0_no_config(struct adapter *adapter, int reset) +{ + struct sge *s = &adapter->sge; + struct fw_caps_config_cmd caps_cmd; + u32 v; + int i, ret; + + /* + * Reset device if necessary + */ + if (reset) { + ret = t4_fw_reset(adapter, adapter->mbox, + PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + } + + /* + * Get device capabilities and select which we'll be using. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + + if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adapter->pdev_dev, "virtualization ACLs not supported"); + goto bye; + } + caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + NULL); + if (ret < 0) + goto bye; + + /* + * Tweak configuration based on system architecture, module + * parameters, etc. + */ + ret = adap_init0_tweaks(adapter); + if (ret < 0) + goto bye; + + /* + * Select RSS Global Mode we want to use. We use "Basic Virtual" + * mode which maps each Virtual Interface to its own section of + * the RSS Table and we turn on all map and hash enables ... + */ + adapter->flags |= RSS_TNLALLLOOKUP; + ret = t4_config_glbl_rss(adapter, adapter->mbox, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | + ((adapter->flags & RSS_TNLALLLOOKUP) ? + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0)); + if (ret < 0) + goto bye; + + /* + * Set up our own fundamental resource provisioning ... + */ + ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0, + PFRES_NEQ, PFRES_NETHCTRL, + PFRES_NIQFLINT, PFRES_NIQ, + PFRES_TC, PFRES_NVI, + FW_PFVF_CMD_CMASK_MASK, + pfvfres_pmask(adapter, adapter->fn, 0), + PFRES_NEXACTF, + PFRES_R_CAPS, PFRES_WX_CAPS); + if (ret < 0) + goto bye; + + /* + * Perform low level SGE initialization. We need to do this before we + * send the firmware the INITIALIZE command because that will cause + * any other PF Drivers which are waiting for the Master + * Initialization to proceed forward. + */ + for (i = 0; i < SGE_NTIMERS - 1; i++) + s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL); + s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; + s->counter_val[0] = 1; + for (i = 1; i < SGE_NCOUNTERS; i++) + s->counter_val[i] = min(intr_cnt[i - 1], + THRESHOLD_0_GET(THRESHOLD_0_MASK)); + t4_sge_init(adapter); + +#ifdef CONFIG_PCI_IOV + /* + * Provision resource limits for Virtual Functions. We currently + * grant them all the same static resource limits except for the Port + * Access Rights Mask which we're assigning based on the PF. All of + * the static provisioning stuff for both the PF and VF really needs + * to be managed in a persistent manner for each device which the + * firmware controls. + */ + { + int pf, vf; + + for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { + if (num_vf[pf] <= 0) + continue; + + /* VF numbering starts at 1! */ + for (vf = 1; vf <= num_vf[pf]; vf++) { + ret = t4_cfg_pfvf(adapter, adapter->mbox, + pf, vf, + VFRES_NEQ, VFRES_NETHCTRL, + VFRES_NIQFLINT, VFRES_NIQ, + VFRES_TC, VFRES_NVI, + FW_PFVF_CMD_CMASK_MASK, + pfvfres_pmask( + adapter, pf, vf), + VFRES_NEXACTF, + VFRES_R_CAPS, VFRES_WX_CAPS); + if (ret < 0) + dev_warn(adapter->pdev_dev, + "failed to "\ + "provision pf/vf=%d/%d; " + "err=%d\n", pf, vf, ret); + } + } + } +#endif + + /* + * Set up the default filter mode. Later we'll want to implement this + * via a firmware command, etc. ... This needs to be done before the + * firmare initialization command ... If the selected set of fields + * isn't equal to the default value, we'll need to make sure that the + * field selections will fit in the 36-bit budget. + */ + if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { + int j, bits = 0; + + for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++) + switch (tp_vlan_pri_map & (1 << j)) { + case 0: + /* compressed filter field not enabled */ + break; + case FCOE_MASK: + bits += 1; + break; + case PORT_MASK: + bits += 3; + break; + case VNIC_ID_MASK: + bits += 17; + break; + case VLAN_MASK: + bits += 17; + break; + case TOS_MASK: + bits += 8; + break; + case PROTOCOL_MASK: + bits += 8; + break; + case ETHERTYPE_MASK: + bits += 16; + break; + case MACMATCH_MASK: + bits += 9; + break; + case MPSHITTYPE_MASK: + bits += 3; + break; + case FRAGMENTATION_MASK: + bits += 1; + break; + } + + if (bits > 36) { + dev_err(adapter->pdev_dev, + "tp_vlan_pri_map=%#x needs %d bits > 36;"\ + " using %#x\n", tp_vlan_pri_map, bits, + TP_VLAN_PRI_MAP_DEFAULT); + tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; + } + } + v = tp_vlan_pri_map; + t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA, + &v, 1, TP_VLAN_PRI_MAP); + + /* + * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order + * to support any of the compressed filter fields above. Newer + * versions of the firmware do this automatically but it doesn't hurt + * to set it here. Meanwhile, we do _not_ need to set Lookup Every + * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets + * since the firmware automatically turns this on and off when we have + * a non-zero number of filters active (since it does have a + * performance impact). + */ + if (tp_vlan_pri_map) + t4_set_reg_field(adapter, TP_GLOBAL_CONFIG, + FIVETUPLELOOKUP_MASK, + FIVETUPLELOOKUP_MASK); + + /* + * Tweak some settings. + */ + t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) | + RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) | + PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) | + KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9)); + + /* + * Get basic stuff going by issuing the Firmware Initialize command. + * Note that this _must_ be after all PFVF commands ... + */ + ret = t4_fw_initialize(adapter, adapter->mbox); + if (ret < 0) + goto bye; + + /* + * Return successfully! + */ + dev_info(adapter->pdev_dev, "Successfully configured using built-in "\ + "driver parameters\n"); + return 0; + + /* + * Something bad happened. Return the error ... + */ +bye: + return ret; +} + +static struct fw_info fw_info_array[] = { + { + .chip = CHELSIO_T4, + .fs_name = FW4_CFNAME, + .fw_mod_name = FW4_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T4, + .fw_ver = __cpu_to_be32(FW_VERSION(T4)), + .intfver_nic = FW_INTFVER(T4, NIC), + .intfver_vnic = FW_INTFVER(T4, VNIC), + .intfver_ri = FW_INTFVER(T4, RI), + .intfver_iscsi = FW_INTFVER(T4, ISCSI), + .intfver_fcoe = FW_INTFVER(T4, FCOE), + }, + }, { + .chip = CHELSIO_T5, + .fs_name = FW5_CFNAME, + .fw_mod_name = FW5_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T5, + .fw_ver = __cpu_to_be32(FW_VERSION(T5)), + .intfver_nic = FW_INTFVER(T5, NIC), + .intfver_vnic = FW_INTFVER(T5, VNIC), + .intfver_ri = FW_INTFVER(T5, RI), + .intfver_iscsi = FW_INTFVER(T5, ISCSI), + .intfver_fcoe = FW_INTFVER(T5, FCOE), + }, + } +}; + +static struct fw_info *find_fw_info(int chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { + if (fw_info_array[i].chip == chip) + return &fw_info_array[i]; + } + return NULL; +} + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. */ static int adap_init0(struct adapter *adap) { @@ -2961,72 +5296,276 @@ static int adap_init0(struct adapter *adap) u32 v, port_vec; enum dev_state state; u32 params[7], val[7]; - struct fw_caps_config_cmd c; - - ret = t4_check_fw_version(adap); - if (ret == -EINVAL || ret > 0) { - if (upgrade_fw(adap) >= 0) /* recache FW version */ - ret = t4_check_fw_version(adap); - } - if (ret < 0) - return ret; + struct fw_caps_config_cmd caps_cmd; + int reset = 1; - /* contact FW, request master */ - ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); + /* + * Contact FW, advertising Master capability (and potentially forcing + * ourselves as the Master PF if our module parameter force_init is + * set). + */ + ret = t4_fw_hello(adap, adap->mbox, adap->fn, + force_init ? MASTER_MUST : MASTER_MAY, + &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); return ret; } + if (ret == adap->mbox) + adap->flags |= MASTER_PF; + if (force_init && state == DEV_STATE_INIT) + state = DEV_STATE_UNINIT; - /* reset device */ - ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); - if (ret < 0) - goto bye; + /* + * If we're the Master PF Driver and the device is uninitialized, + * then let's consider upgrading the firmware ... (We always want + * to check the firmware version number in order to A. get it for + * later reporting and B. to warn if the currently loaded firmware + * is excessively mismatched relative to the driver.) + */ + t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_tp_version(adap, &adap->params.tp_vers); + if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { + struct fw_info *fw_info; + struct fw_hdr *card_fw; + const struct firmware *fw; + const u8 *fw_data = NULL; + unsigned int fw_size = 0; + + /* This is the firmware whose headers the driver was compiled + * against + */ + fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); + if (fw_info == NULL) { + dev_err(adap->pdev_dev, + "unable to get firmware info for chip %d.\n", + CHELSIO_CHIP_VERSION(adap->params.chip)); + return -EINVAL; + } - for (v = 0; v < SGE_NTIMERS - 1; v++) - adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); - adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; - adap->sge.counter_val[0] = 1; - for (v = 1; v < SGE_NCOUNTERS; v++) - adap->sge.counter_val[v] = min(intr_cnt[v - 1], - THRESHOLD_3_MASK); -#define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = t4_alloc_mem(sizeof(*card_fw)); + + /* Get FW from from /lib/firmware/ */ + ret = request_firmware(&fw, fw_info->fw_mod_name, + adap->pdev_dev); + if (ret < 0) { + dev_err(adap->pdev_dev, + "unable to load firmware image %s, error %d\n", + fw_info->fw_mod_name, ret); + } else { + fw_data = fw->data; + fw_size = fw->size; + } + + /* upgrade FW logic */ + ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, + state, &reset); - params[0] = FW_PARAM_DEV(CCLK); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); + /* Cleaning up */ + if (fw != NULL) + release_firmware(fw); + t4_free_mem(card_fw); + + if (ret < 0) + goto bye; + } + + /* + * Grab VPD parameters. This should be done after we establish a + * connection to the firmware since some of the VPD parameters + * (notably the Core Clock frequency) are retrieved via requests to + * the firmware. On the other hand, we need these fairly early on + * so we do this right after getting ahold of the firmware. + */ + ret = get_vpd_params(adap, &adap->params.vpd); if (ret < 0) goto bye; - adap->params.vpd.cclk = val[0]; - ret = adap_init1(adap, &c); + /* + * Find out what ports are available to us. Note that we need to do + * this before calling adap_init0_no_config() since it needs nports + * and portvec ... + */ + v = + FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); if (ret < 0) goto bye; + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + + /* + * If the firmware is initialized already (and we're not forcing a + * master initialization), note that we're living with existing + * adapter parameters. Otherwise, it's time to try initializing the + * adapter ... + */ + if (state == DEV_STATE_INIT) { + dev_info(adap->pdev_dev, "Coming up as %s: "\ + "Adapter already initialized\n", + adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); + adap->flags |= USING_SOFT_PARAMS; + } else { + dev_info(adap->pdev_dev, "Coming up as MASTER: "\ + "Initializing adapter\n"); + + /* + * If the firmware doesn't support Configuration + * Files warn user and exit, + */ + if (ret < 0) + dev_warn(adap->pdev_dev, "Firmware doesn't support " + "configuration file.\n"); + if (force_old_init) + ret = adap_init0_no_config(adap, reset); + else { + /* + * Find out whether we're dealing with a version of + * the firmware which has configuration file support. + */ + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, + params, val); + + /* + * If the firmware doesn't support Configuration + * Files, use the old Driver-based, hard-wired + * initialization. Otherwise, try using the + * Configuration File support and fall back to the + * Driver-based initialization if there's no + * Configuration File found. + */ + if (ret < 0) + ret = adap_init0_no_config(adap, reset); + else { + /* + * The firmware provides us with a memory + * buffer where we can load a Configuration + * File from the host if we want to override + * the Configuration File in flash. + */ + + ret = adap_init0_config(adap, reset); + if (ret == -ENOENT) { + dev_info(adap->pdev_dev, + "No Configuration File present " + "on adapter. Using hard-wired " + "configuration parameters.\n"); + ret = adap_init0_no_config(adap, reset); + } + } + } + if (ret < 0) { + dev_err(adap->pdev_dev, + "could not initialize adapter, error %d\n", + -ret); + goto bye; + } + } + + /* + * If we're living with non-hard-coded parameters (either from a + * Firmware Configuration File or values programmed by a different PF + * Driver), give the SGE code a chance to pull in anything that it + * needs ... Note that this must be called after we retrieve our VPD + * parameters in order to know how to convert core ticks to seconds. + */ + if (adap->flags & USING_SOFT_PARAMS) { + ret = t4_sge_init(adap); + if (ret < 0) + goto bye; + } + + if (is_bypass_device(adap->pdev->device)) + adap->params.bypass = 1; + + /* + * Grab some of our basic fundamental operating parameters. + */ +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + #define FW_PARAM_PFVF(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ - FW_PARAMS_PARAM_Y(adap->fn)) + FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y(0) | \ + FW_PARAMS_PARAM_Z(0) - params[0] = FW_PARAM_DEV(PORTVEC); + params[0] = FW_PARAM_PFVF(EQ_START); params[1] = FW_PARAM_PFVF(L2T_START); params[2] = FW_PARAM_PFVF(L2T_END); params[3] = FW_PARAM_PFVF(FILTER_START); params[4] = FW_PARAM_PFVF(FILTER_END); params[5] = FW_PARAM_PFVF(IQFLINT_START); - params[6] = FW_PARAM_PFVF(EQ_START); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); if (ret < 0) goto bye; - port_vec = val[0]; + adap->sge.egr_start = val[0]; + adap->l2t_start = val[1]; + adap->l2t_end = val[2]; adap->tids.ftid_base = val[3]; adap->tids.nftids = val[4] - val[3] + 1; adap->sge.ingr_start = val[5]; - adap->sge.egr_start = val[6]; - if (c.ofldcaps) { + /* query params related to active filter region */ + params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); + params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + /* If Active filter size is set we enable establishing + * offload connection through firmware work request + */ + if ((val[0] != val[1]) && (ret >= 0)) { + adap->flags |= FW_OFLD_CONN; + adap->tids.aftid_base = val[0]; + adap->tids.aftid_end = val[1]; + } + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); + val[0] = 1; + (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); + + /* + * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL + * capability. Earlier versions of the firmware didn't have the + * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no + * permission to use ULPTX MEMWRITE DSGL. + */ + if (is_t4(adap->params.chip)) { + adap->params.ulptx_memwrite_dsgl = false; + } else { + params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, + 1, params, val); + adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); + } + + /* + * Get device capabilities so we can determine what resources we need + * to manage. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + + if (caps_cmd.ofldcaps) { /* query offload-related parameters */ params[0] = FW_PARAM_DEV(NTID); params[1] = FW_PARAM_PFVF(SERVER_START); @@ -3034,28 +5573,46 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(TDDP_START); params[4] = FW_PARAM_PFVF(TDDP_END); params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + params, val); if (ret < 0) goto bye; adap->tids.ntids = val[0]; adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); adap->tids.stid_base = val[1]; adap->tids.nstids = val[2] - val[1] + 1; + /* + * Setup server filter region. Divide the availble filter + * region into two parts. Regular filters get 1/3rd and server + * filters get 2/3rd part. This is only enabled if workarond + * path is enabled. + * 1. For regular filters. + * 2. Server filter: This are special filters which are used + * to redirect SYN packets to offload queue. + */ + if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { + adap->tids.sftid_base = adap->tids.ftid_base + + DIV_ROUND_UP(adap->tids.nftids, 3); + adap->tids.nsftids = adap->tids.nftids - + DIV_ROUND_UP(adap->tids.nftids, 3); + adap->tids.nftids = adap->tids.sftid_base - + adap->tids.ftid_base; + } adap->vres.ddp.start = val[3]; adap->vres.ddp.size = val[4] - val[3] + 1; adap->params.ofldq_wr_cred = val[5]; + adap->params.offload = 1; } - if (c.rdmacaps) { + if (caps_cmd.rdmacaps) { params[0] = FW_PARAM_PFVF(STAG_START); params[1] = FW_PARAM_PFVF(STAG_END); params[2] = FW_PARAM_PFVF(RQ_START); params[3] = FW_PARAM_PFVF(RQ_END); params[4] = FW_PARAM_PFVF(PBL_START); params[5] = FW_PARAM_PFVF(PBL_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + params, val); if (ret < 0) goto bye; adap->vres.stag.start = val[0]; @@ -3071,8 +5628,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(CQ_END); params[4] = FW_PARAM_PFVF(OCQ_START); params[5] = FW_PARAM_PFVF(OCQ_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); if (ret < 0) goto bye; adap->vres.qp.start = val[0]; @@ -3082,11 +5638,11 @@ static int adap_init0(struct adapter *adap) adap->vres.ocq.start = val[4]; adap->vres.ocq.size = val[5] - val[4] + 1; } - if (c.iscsicaps) { + if (caps_cmd.iscsicaps) { params[0] = FW_PARAM_PFVF(ISCSI_START); params[1] = FW_PARAM_PFVF(ISCSI_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, - val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + params, val); if (ret < 0) goto bye; adap->vres.iscsi.start = val[0]; @@ -3095,62 +5651,53 @@ static int adap_init0(struct adapter *adap) #undef FW_PARAM_PFVF #undef FW_PARAM_DEV - adap->params.nports = hweight32(port_vec); - adap->params.portvec = port_vec; - adap->flags |= FW_OK; - - /* These are finalized by FW initialization, load their values now */ - v = t4_read_reg(adap, TP_TIMER_RESOLUTION); - adap->params.tp.tre = TIMERRESOLUTION_GET(v); - t4_read_mtu_tbl(adap, adap->params.mtus, NULL); - t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, - adap->params.b_wnd); - -#ifdef CONFIG_PCI_IOV - /* - * Provision resource limits for Virtual Functions. We currently - * grant them all the same static resource limits except for the Port - * Access Rights Mask which we're assigning based on the PF. All of - * the static provisioning stuff for both the PF and VF really needs - * to be managed in a persistent manner for each device which the - * firmware controls. + /* The MTU/MSS Table is initialized by now, so load their values. If + * we're initializing the adapter, then we'll make any modifications + * we want to the MTU/MSS Table and also initialize the congestion + * parameters. */ - { - int pf, vf; - - for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { - if (num_vf[pf] <= 0) - continue; + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + if (state != DEV_STATE_INIT) { + int i; - /* VF numbering starts at 1! */ - for (vf = 1; vf <= num_vf[pf]; vf++) { - ret = t4_cfg_pfvf(adap, adap->fn, pf, vf, - VFRES_NEQ, VFRES_NETHCTRL, - VFRES_NIQFLINT, VFRES_NIQ, - VFRES_TC, VFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, - pfvfres_pmask(adap, pf, vf), - VFRES_NEXACTF, - VFRES_R_CAPS, VFRES_WX_CAPS); - if (ret < 0) - dev_warn(adap->pdev_dev, "failed to " - "provision pf/vf=%d/%d; " - "err=%d\n", pf, vf, ret); + /* The default MTU Table contains values 1492 and 1500. + * However, for TCP, it's better to have two values which are + * a multiple of 8 +/- 4 bytes apart near this popular MTU. + * This allows us to have a TCP Data Payload which is a + * multiple of 8 regardless of what combination of TCP Options + * are in use (always a multiple of 4 bytes) which is + * important for performance reasons. For instance, if no + * options are in use, then we have a 20-byte IP header and a + * 20-byte TCP header. In this case, a 1500-byte MSS would + * result in a TCP Data Payload of 1500 - 40 == 1460 bytes + * which is not a multiple of 8. So using an MSS of 1488 in + * this case results in a TCP Data Payload of 1448 bytes which + * is a multiple of 8. On the other hand, if 12-byte TCP Time + * Stamps have been negotiated, then an MTU of 1500 bytes + * results in a TCP Data Payload of 1448 bytes which, as + * above, is a multiple of 8 bytes ... + */ + for (i = 0; i < NMTUS; i++) + if (adap->params.mtus[i] == 1492) { + adap->params.mtus[i] = 1488; + break; } - } - } -#endif - setup_memwin(adap); + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + } + t4_init_tp_params(adap); + adap->flags |= FW_OK; return 0; /* - * If a command timed out or failed with EIO FW does not operate within - * its spec or something catastrophic happened to HW/FW, stop issuing - * commands. + * Something bad happened. If a command timed out or failed with EIO + * FW does not operate within its spec or something catastrophic + * happened to HW/FW, stop issuing commands. */ -bye: if (ret != -ETIMEDOUT && ret != -EIO) - t4_fw_bye(adap, adap->fn); +bye: + if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, adap->mbox); return ret; } @@ -3168,16 +5715,21 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, rtnl_lock(); adap->flags &= ~FW_OK; notify_ulds(adap, CXGB4_STATE_START_RECOVERY); + spin_lock(&adap->stats_lock); for_each_port(adap, i) { struct net_device *dev = adap->port[i]; netif_device_detach(dev); netif_carrier_off(dev); } + spin_unlock(&adap->stats_lock); if (adap->flags & FULL_INIT_DONE) cxgb_down(adap); rtnl_unlock(); - pci_disable_device(pdev); + if ((adap->flags & DEV_ENABLED)) { + pci_disable_device(pdev); + adap->flags &= ~DEV_ENABLED; + } out: return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; } @@ -3194,9 +5746,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, "cannot reenable PCI device after reset\n"); - return PCI_ERS_RESULT_DISCONNECT; + if (!(adap->flags & DEV_ENABLED)) { + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Cannot reenable PCI " + "device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + adap->flags |= DEV_ENABLED; } pci_set_master(pdev); @@ -3206,7 +5762,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) if (t4_wait_dev_ready(adap) < 0) return PCI_ERS_RESULT_DISCONNECT; - if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) + if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) return PCI_ERS_RESULT_DISCONNECT; adap->flags |= FW_OK; if (adap_init1(adap, &c)) @@ -3252,23 +5808,24 @@ static void eeh_resume(struct pci_dev *pdev) rtnl_unlock(); } -static struct pci_error_handlers cxgb4_eeh = { +static const struct pci_error_handlers cxgb4_eeh = { .error_detected = eeh_err_detected, .slot_reset = eeh_slot_reset, .resume = eeh_resume, }; -static inline bool is_10g_port(const struct link_config *lc) +static inline bool is_x_10g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || + (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; } -static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, +static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt, unsigned int size, unsigned int iqe_size) { - q->intr_params = QINTR_TIMER_IDX(timer_idx) | - (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); - q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; + q->adap = adap; + set_rspq_intr_params(q, us, cnt); q->iqe_len = iqe_size; q->size = size; } @@ -3278,13 +5835,14 @@ static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, * of ports we found and the number of available CPUs. Most settings can be * modified by the admin prior to actual use. */ -static void __devinit cfg_queues(struct adapter *adap) +static void cfg_queues(struct adapter *adap) { struct sge *s = &adap->sge; int i, q10g = 0, n10g = 0, qidx = 0; + int ciq_size; for_each_port(adap, i) - n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); + n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); /* * We default to 1 queue per non-10G port and up to # of cores queues @@ -3292,14 +5850,14 @@ static void __devinit cfg_queues(struct adapter *adap) */ if (n10g) q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; - if (q10g > num_online_cpus()) - q10g = num_online_cpus(); + if (q10g > netif_get_num_default_rss_queues()) + q10g = netif_get_num_default_rss_queues(); for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); pi->first_qset = qidx; - pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; qidx += pi->nqsets; } @@ -3320,12 +5878,13 @@ static void __devinit cfg_queues(struct adapter *adap) s->ofldqsets = adap->params.nports; /* For RDMA one Rx queue per channel suffices */ s->rdmaqs = adap->params.nports; + s->rdmaciqs = adap->params.nports; } for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { struct sge_eth_rxq *r = &s->ethrxq[i]; - init_rspq(&r->rspq, 0, 0, 1024, 64); + init_rspq(adap, &r->rspq, 5, 10, 1024, 64); r->fl.size = 72; } @@ -3341,7 +5900,7 @@ static void __devinit cfg_queues(struct adapter *adap) for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { struct sge_ofld_rxq *r = &s->ofldrxq[i]; - init_rspq(&r->rspq, 0, 0, 1024, 64); + init_rspq(adap, &r->rspq, 5, 1, 1024, 64); r->rspq.uld = CXGB4_ULD_ISCSI; r->fl.size = 72; } @@ -3349,20 +5908,33 @@ static void __devinit cfg_queues(struct adapter *adap) for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { struct sge_ofld_rxq *r = &s->rdmarxq[i]; - init_rspq(&r->rspq, 0, 0, 511, 64); + init_rspq(adap, &r->rspq, 5, 1, 511, 64); r->rspq.uld = CXGB4_ULD_RDMA; r->fl.size = 72; } - init_rspq(&s->fw_evtq, 6, 0, 512, 64); - init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); + ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; + if (ciq_size > SGE_MAX_IQ_SIZE) { + CH_WARN(adap, "CIQ size too small for available IQs\n"); + ciq_size = SGE_MAX_IQ_SIZE; + } + + for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) { + struct sge_ofld_rxq *r = &s->rdmaciq[i]; + + init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); + r->rspq.uld = CXGB4_ULD_RDMA; + } + + init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); + init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64); } /* * Reduce the number of Ethernet queues across all ports to at most n. * n provides at least one queue per port. */ -static void __devinit reduce_ethqs(struct adapter *adap, int n) +static void reduce_ethqs(struct adapter *adap, int n) { int i; struct port_info *pi; @@ -3389,10 +5961,10 @@ static void __devinit reduce_ethqs(struct adapter *adap, int n) /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ #define EXTRA_VECS 2 -static int __devinit enable_msix(struct adapter *adap) +static int enable_msix(struct adapter *adap) { int ofld_need = 0; - int i, err, want, need; + int i, want, need; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; struct msix_entry entries[MAX_INGQ + 1]; @@ -3402,43 +5974,41 @@ static int __devinit enable_msix(struct adapter *adap) want = s->max_ethqsets + EXTRA_VECS; if (is_offload(adap)) { - want += s->rdmaqs + s->ofldqsets; + want += s->rdmaqs + s->rdmaciqs + s->ofldqsets; /* need nchan for each possible ULD */ - ofld_need = 2 * nchan; + ofld_need = 3 * nchan; } need = adap->params.nports + EXTRA_VECS + ofld_need; - while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) - want = err; + want = pci_enable_msix_range(adap->pdev, entries, need, want); + if (want < 0) + return want; - if (!err) { - /* - * Distribute available vectors to the various queue groups. - * Every group gets its minimum requirement and NIC gets top - * priority for leftovers. - */ - i = want - EXTRA_VECS - ofld_need; - if (i < s->max_ethqsets) { - s->max_ethqsets = i; - if (i < s->ethqsets) - reduce_ethqs(adap, i); - } - if (is_offload(adap)) { - i = want - EXTRA_VECS - s->max_ethqsets; - i -= ofld_need - nchan; - s->ofldqsets = (i / nchan) * nchan; /* round down */ - } - for (i = 0; i < want; ++i) - adap->msix_info[i].vec = entries[i].vector; - } else if (err > 0) - dev_info(adap->pdev_dev, - "only %d MSI-X vectors left, not using MSI-X\n", err); - return err; + /* + * Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + i = want - EXTRA_VECS - ofld_need; + if (i < s->max_ethqsets) { + s->max_ethqsets = i; + if (i < s->ethqsets) + reduce_ethqs(adap, i); + } + if (is_offload(adap)) { + i = want - EXTRA_VECS - s->max_ethqsets; + i -= ofld_need - nchan; + s->ofldqsets = (i / nchan) * nchan; /* round down */ + } + for (i = 0; i < want; ++i) + adap->msix_info[i].vec = entries[i].vector; + + return 0; } #undef EXTRA_VECS -static int __devinit init_rss(struct adapter *adap) +static int init_rss(struct adapter *adap) { unsigned int i, j; @@ -3454,13 +6024,8 @@ static int __devinit init_rss(struct adapter *adap) return 0; } -static void __devinit print_port_info(const struct net_device *dev) +static void print_port_info(const struct net_device *dev) { - static const char *base[] = { - "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", - "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4" - }; - char buf[80]; char *bufp = buf; const char *spd = ""; @@ -3471,6 +6036,8 @@ static void __devinit print_port_info(const struct net_device *dev) spd = " 2.5 GT/s"; else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) spd = " 5 GT/s"; + else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) + spd = " 8 GT/s"; if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) bufp += sprintf(bufp, "100/"); @@ -3478,30 +6045,25 @@ static void __devinit print_port_info(const struct net_device *dev) bufp += sprintf(bufp, "1000/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + bufp += sprintf(bufp, "40G/"); if (bufp != buf) --bufp; - sprintf(bufp, "BASE-%s", base[pi->port_type]); + sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", - adap->params.vpd.id, adap->params.rev, buf, + adap->params.vpd.id, + CHELSIO_CHIP_RELEASE(adap->params.chip), buf, is_offload(adap) ? "R" : "", adap->params.pci.width, spd, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); - netdev_info(dev, "S/N: %s, E/C: %s\n", - adap->params.vpd.sn, adap->params.vpd.ec); + netdev_info(dev, "S/N: %s, P/N: %s\n", + adap->params.vpd.sn, adap->params.vpd.pn); } -static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) +static void enable_pcie_relaxed_ordering(struct pci_dev *dev) { - u16 v; - int pos; - - pos = pci_pcie_cap(dev); - if (pos > 0) { - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v); - v |= PCI_EXP_DEVCTL_RELAX_EN; - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v); - } + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); } /* @@ -3531,11 +6093,11 @@ static void free_some_resources(struct adapter *adapter) #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) +#define SEGMENT_SIZE 128 -static int __devinit init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int func, i, err; + int func, i, err, s_qpp, qpp, num_seg; struct port_info *pi; bool highdma = false; struct adapter *adapter = NULL; @@ -3589,6 +6151,9 @@ static int __devinit init_one(struct pci_dev *pdev, goto out_disable_device; } + /* PCI device has been enabled */ + adapter->flags |= DEV_ENABLED; + adapter->regs = pci_ioremap_bar(pdev, 0); if (!adapter->regs) { dev_err(&pdev->dev, "cannot map device registers\n"); @@ -3598,6 +6163,7 @@ static int __devinit init_one(struct pci_dev *pdev, adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; + adapter->mbox = func; adapter->fn = func; adapter->msg_enable = dflt_msg_enable; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); @@ -3606,11 +6172,42 @@ static int __devinit init_one(struct pci_dev *pdev, spin_lock_init(&adapter->tid_release_lock); INIT_WORK(&adapter->tid_release_task, process_tid_release_list); + INIT_WORK(&adapter->db_full_task, process_db_full); + INIT_WORK(&adapter->db_drop_task, process_db_drop); err = t4_prep_adapter(adapter); if (err) - goto out_unmap_bar; + goto out_unmap_bar0; + + if (!is_t4(adapter->params.chip)) { + s_qpp = QUEUESPERPAGEPF1 * adapter->fn; + qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, + SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); + num_seg = PAGE_SIZE / SEGMENT_SIZE; + + /* Each segment size is 128B. Write coalescing is enabled only + * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the + * queue is less no of segments that can be accommodated in + * a page size. + */ + if (qpp > num_seg) { + dev_err(&pdev->dev, + "Incorrect number of egress queues per page\n"); + err = -EINVAL; + goto out_unmap_bar0; + } + adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!adapter->bar2) { + dev_err(&pdev->dev, "cannot map device bar2 region\n"); + err = -ENOMEM; + goto out_unmap_bar0; + } + } + + setup_memwin(adapter); err = adap_init0(adapter); + setup_memwin_rdma(adapter); if (err) goto out_unmap_bar; @@ -3636,7 +6233,7 @@ static int __devinit init_one(struct pci_dev *pdev, netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (highdma) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->features |= netdev->hw_features; @@ -3645,7 +6242,7 @@ static int __devinit init_one(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->netdev_ops = &cxgb4_netdev_ops; - SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); + netdev->ethtool_ops = &cxgb_ethtool_ops; } pci_set_drvdata(pdev, adapter); @@ -3736,6 +6333,9 @@ sriov: out_free_dev: free_some_resources(adapter); out_unmap_bar: + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + out_unmap_bar0: iounmap(adapter->regs); out_free_adapter: kfree(adapter); @@ -3744,16 +6344,18 @@ sriov: pci_disable_device(pdev); out_release_regions: pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); return err; } -static void __devexit remove_one(struct pci_dev *pdev) +static void remove_one(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); +#ifdef CONFIG_PCI_IOV pci_disable_sriov(pdev); +#endif + if (adapter) { int i; @@ -3767,16 +6369,32 @@ static void __devexit remove_one(struct pci_dev *pdev) if (adapter->debugfs_root) debugfs_remove_recursive(adapter->debugfs_root); + /* If we allocated filters, free up state associated with any + * valid filters ... + */ + if (adapter->tids.ftid_tab) { + struct filter_entry *f = &adapter->tids.ftid_tab[0]; + for (i = 0; i < (adapter->tids.nftids + + adapter->tids.nsftids); i++, f++) + if (f->valid) + clear_filter(adapter, f); + } + if (adapter->flags & FULL_INIT_DONE) cxgb_down(adapter); free_some_resources(adapter); iounmap(adapter->regs); - kfree(adapter); + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); + if ((adapter->flags & DEV_ENABLED)) { + pci_disable_device(pdev); + adapter->flags &= ~DEV_ENABLED; + } pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); + synchronize_rcu(); + kfree(adapter); } else pci_release_regions(pdev); } @@ -3785,7 +6403,8 @@ static struct pci_driver cxgb4_driver = { .name = KBUILD_MODNAME, .id_table = cxgb4_pci_tbl, .probe = init_one, - .remove = __devexit_p(remove_one), + .remove = remove_one, + .shutdown = remove_one, .err_handler = &cxgb4_eeh, }; @@ -3793,21 +6412,33 @@ static int __init cxgb4_init_module(void) { int ret; + workq = create_singlethread_workqueue("cxgb4"); + if (!workq) + return -ENOMEM; + /* Debugfs support is optional, just warn if this fails */ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); if (!cxgb4_debugfs_root) - pr_warning("could not create debugfs entry, continuing\n"); + pr_warn("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4_driver); - if (ret < 0) + if (ret < 0) { debugfs_remove(cxgb4_debugfs_root); + destroy_workqueue(workq); + } + + register_inet6addr_notifier(&cxgb4_inet6addr_notifier); + return ret; } static void __exit cxgb4_cleanup_module(void) { + unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); pci_unregister_driver(&cxgb4_driver); debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ + flush_workqueue(workq); + destroy_workqueue(workq); } module_init(cxgb4_init_module); |
