diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-03-29 15:23:24 -0800 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-31 13:14:18 -0800 |
commit | 7bb206e3b20477c8bcbbdf20834d456b0b6d82c4 (patch) | |
tree | 020464ec844664ebdcee40e05630751a20075afb /drivers/infiniband | |
parent | 064c94f9da8845f12446ab37142aa10f3c6f66ac (diff) |
IB/ipath: core device driver
The ipath driver is a low-level driver for PathScale InfiniPath host
channel adapters (HCAs) based on the HT-400 and PE-800 chips, including
the InfiniPath HT-460, the small form factor InfiniPath HT-460, the
InfiniPath HT-470 and the Linux Networx LS/X.
The ipath_driver.c file contains much of the low-level device handling
code.
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_driver.c | 1983 |
1 files changed, 1983 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c new file mode 100644 index 00000000000..58a94efb007 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -0,0 +1,1983 @@ +/* + * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/spinlock.h> +#include <linux/idr.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> + +#include "ipath_kernel.h" +#include "ips_common.h" +#include "ipath_layer.h" + +static void ipath_update_pio_bufs(struct ipath_devdata *); + +const char *ipath_get_unit_name(int unit) +{ + static char iname[16]; + snprintf(iname, sizeof iname, "infinipath%u", unit); + return iname; +} + +EXPORT_SYMBOL_GPL(ipath_get_unit_name); + +#define DRIVER_LOAD_MSG "PathScale " IPATH_DRV_NAME " loaded: " +#define PFX IPATH_DRV_NAME ": " + +/* + * The size has to be longer than this string, so we can append + * board/chip information to it in the init code. + */ +const char ipath_core_version[] = IPATH_IDSTR "\n"; + +static struct idr unit_table; +DEFINE_SPINLOCK(ipath_devs_lock); +LIST_HEAD(ipath_dev_list); + +wait_queue_head_t ipath_sma_state_wait; + +unsigned ipath_debug = __IPATH_INFO; + +module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(debug, "mask for debug prints"); +EXPORT_SYMBOL_GPL(ipath_debug); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("PathScale <support@pathscale.com>"); +MODULE_DESCRIPTION("Pathscale InfiniPath driver"); + +const char *ipath_ibcstatus_str[] = { + "Disabled", + "LinkUp", + "PollActive", + "PollQuiet", + "SleepDelay", + "SleepQuiet", + "LState6", /* unused */ + "LState7", /* unused */ + "CfgDebounce", + "CfgRcvfCfg", + "CfgWaitRmt", + "CfgIdle", + "RecovRetrain", + "LState0xD", /* unused */ + "RecovWaitRmt", + "RecovIdle", +}; + +/* + * These variables are initialized in the chip-specific files + * but are defined here. + */ +u16 ipath_gpio_sda_num, ipath_gpio_scl_num; +u64 ipath_gpio_sda, ipath_gpio_scl; +u64 infinipath_i_bitsextant; +ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant; +u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask; + +static void __devexit ipath_remove_one(struct pci_dev *); +static int __devinit ipath_init_one(struct pci_dev *, + const struct pci_device_id *); + +/* Only needed for registration, nothing else needs this info */ +#define PCI_VENDOR_ID_PATHSCALE 0x1fc1 +#define PCI_DEVICE_ID_INFINIPATH_HT 0xd +#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 + +static const struct pci_device_id ipath_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, + PCI_DEVICE_ID_INFINIPATH_HT)}, + {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, + PCI_DEVICE_ID_INFINIPATH_PE800)}, +}; + +MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); + +static struct pci_driver ipath_driver = { + .name = IPATH_DRV_NAME, + .probe = ipath_init_one, + .remove = __devexit_p(ipath_remove_one), + .id_table = ipath_pci_tbl, +}; + +/* + * This is where port 0's rcvhdrtail register is written back; we also + * want nothing else sharing the cache line, so make it a cache line + * in size. Used for all units. + */ +volatile __le64 *ipath_port0_rcvhdrtail; +dma_addr_t ipath_port0_rcvhdrtail_dma; +static int port0_rcvhdrtail_refs; + +static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev, + u32 *bar0, u32 *bar1) +{ + int ret; + + ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0); + if (ret) + ipath_dev_err(dd, "failed to read bar0 before enable: " + "error %d\n", -ret); + + ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1); + if (ret) + ipath_dev_err(dd, "failed to read bar1 before enable: " + "error %d\n", -ret); + + ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1); +} + +static void ipath_free_devdata(struct pci_dev *pdev, + struct ipath_devdata *dd) +{ + unsigned long flags; + + pci_set_drvdata(pdev, NULL); + + if (dd->ipath_unit != -1) { + spin_lock_irqsave(&ipath_devs_lock, flags); + idr_remove(&unit_table, dd->ipath_unit); + list_del(&dd->ipath_list); + spin_unlock_irqrestore(&ipath_devs_lock, flags); + } + dma_free_coherent(&pdev->dev, sizeof(*dd), dd, dd->ipath_dma_addr); +} + +static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev) +{ + unsigned long flags; + struct ipath_devdata *dd; + dma_addr_t dma_addr; + int ret; + + if (!idr_pre_get(&unit_table, GFP_KERNEL)) { + dd = ERR_PTR(-ENOMEM); + goto bail; + } + + dd = dma_alloc_coherent(&pdev->dev, sizeof(*dd), &dma_addr, + GFP_KERNEL); + + if (!dd) { + dd = ERR_PTR(-ENOMEM); + goto bail; + } + + dd->ipath_dma_addr = dma_addr; + dd->ipath_unit = -1; + + spin_lock_irqsave(&ipath_devs_lock, flags); + + ret = idr_get_new(&unit_table, dd, &dd->ipath_unit); + if (ret < 0) { + printk(KERN_ERR IPATH_DRV_NAME + ": Could not allocate unit ID: error %d\n", -ret); + ipath_free_devdata(pdev, dd); + dd = ERR_PTR(ret); + goto bail_unlock; + } + + dd->pcidev = pdev; + pci_set_drvdata(pdev, dd); + + list_add(&dd->ipath_list, &ipath_dev_list); + +bail_unlock: + spin_unlock_irqrestore(&ipath_devs_lock, flags); + +bail: + return dd; +} + +static inline struct ipath_devdata *__ipath_lookup(int unit) +{ + return idr_find(&unit_table, unit); +} + +struct ipath_devdata *ipath_lookup(int unit) +{ + struct ipath_devdata *dd; + unsigned long flags; + + spin_lock_irqsave(&ipath_devs_lock, flags); + dd = __ipath_lookup(unit); + spin_unlock_irqrestore(&ipath_devs_lock, flags); + + return dd; +} + +int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp) +{ + int nunits, npresent, nup; + struct ipath_devdata *dd; + unsigned long flags; + u32 maxports; + + nunits = npresent = nup = maxports = 0; + + spin_lock_irqsave(&ipath_devs_lock, flags); + + list_for_each_entry(dd, &ipath_dev_list, ipath_list) { + nunits++; + if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase) + npresent++; + if (dd->ipath_lid && + !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN + | IPATH_LINKUNK))) + nup++; + if (dd->ipath_cfgports > maxports) + maxports = dd->ipath_cfgports; + } + + spin_unlock_irqrestore(&ipath_devs_lock, flags); + + if (npresentp) + *npresentp = npresent; + if (nupp) + *nupp = nup; + if (maxportsp) + *maxportsp = maxports; + + return nunits; +} + +static int init_port0_rcvhdrtail(struct pci_dev *pdev) +{ + int ret; + + mutex_lock(&ipath_mutex); + + if (!ipath_port0_rcvhdrtail) { + ipath_port0_rcvhdrtail = + dma_alloc_coherent(&pdev->dev, + IPATH_PORT0_RCVHDRTAIL_SIZE, + &ipath_port0_rcvhdrtail_dma, + GFP_KERNEL); + + if (!ipath_port0_rcvhdrtail) { + ret = -ENOMEM; + goto bail; + } + } + port0_rcvhdrtail_refs++; + ret = 0; + +bail: + mutex_unlock(&ipath_mutex); + + return ret; +} + +static void cleanup_port0_rcvhdrtail(struct pci_dev *pdev) +{ + mutex_lock(&ipath_mutex); + + if (!--port0_rcvhdrtail_refs) { + dma_free_coherent(&pdev->dev, IPATH_PORT0_RCVHDRTAIL_SIZE, + (void *) ipath_port0_rcvhdrtail, + ipath_port0_rcvhdrtail_dma); + ipath_port0_rcvhdrtail = NULL; + } + + mutex_unlock(&ipath_mutex); +} + +/* + * These next two routines are placeholders in case we don't have per-arch + * code for controlling write combining. If explicit control of write + * combining is not available, performance will probably be awful. + */ + +int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd) +{ + return -EOPNOTSUPP; +} + +void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd) +{ +} + +static int __devinit ipath_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret, len, j; + struct ipath_devdata *dd; + unsigned long long addr; + u32 bar0 = 0, bar1 = 0; + u8 rev; + + ret = init_port0_rcvhdrtail(pdev); + if (ret < 0) { + printk(KERN_ERR IPATH_DRV_NAME + ": Could not allocate port0_rcvhdrtail: error %d\n", + -ret); + goto bail; + } + + dd = ipath_alloc_devdata(pdev); + if (IS_ERR(dd)) { + ret = PTR_ERR(dd); + printk(KERN_ERR IPATH_DRV_NAME + ": Could not allocate devdata: error %d\n", -ret); + goto bail_rcvhdrtail; + } + + ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit); + + read_bars(dd, pdev, &bar0, &bar1); + + ret = pci_enable_device(pdev); + if (ret) { + /* This can happen iff: + * + * We did a chip reset, and then failed to reprogram the + * BAR, or the chip reset due to an internal error. We then + * unloaded the driver and reloaded it. + * + * Both reset cases set the BAR back to initial state. For + * the latter case, the AER sticky error bit at offset 0x718 + * should be set, but the Linux kernel doesn't yet know + * about that, it appears. If the original BAR was retained + * in the kernel data structures, this may be OK. + */ + ipath_dev_err(dd, "enable unit %d failed: error %d\n", + dd->ipath_unit, -ret); + goto bail_devdata; + } + addr = pci_resource_start(pdev, 0); + len = pci_resource_len(pdev, 0); + ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %x, vend %x/%x " + "driver_data %lx\n", addr, len, pdev->irq, ent->vendor, + ent->device, ent->driver_data); + + read_bars(dd, pdev, &bar0, &bar1); + + if (!bar1 && !(bar0 & ~0xf)) { + if (addr) { + dev_info(&pdev->dev, "BAR is 0 (probable RESET), " + "rewriting as %llx\n", addr); + ret = pci_write_config_dword( + pdev, PCI_BASE_ADDRESS_0, addr); + if (ret) { + ipath_dev_err(dd, "rewrite of BAR0 " + "failed: err %d\n", -ret); + goto bail_disable; + } + ret = pci_write_config_dword( + pdev, PCI_BASE_ADDRESS_1, addr >> 32); + if (ret) { + ipath_dev_err(dd, "rewrite of BAR1 " + "failed: err %d\n", -ret); + goto bail_disable; + } + } else { + ipath_dev_err(dd, "BAR is 0 (probable RESET), " + "not usable until reboot\n"); + ret = -ENODEV; + goto bail_disable; + } + } + + ret = pci_request_regions(pdev, IPATH_DRV_NAME); + if (ret) { + dev_info(&pdev->dev, "pci_request_regions unit %u fails: " + "err %d\n", dd->ipath_unit, -ret); + goto bail_disable; + } + + ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (ret) { + dev_info(&pdev->dev, "pci_set_dma_mask unit %u " + "fails: %d\n", dd->ipath_unit, ret); + goto bail_regions; + } + + pci_set_master(pdev); + + /* + * Save BARs to rewrite after device reset. Save all 64 bits of + * BAR, just in case. + */ + dd->ipath_pcibar0 = addr; + dd->ipath_pcibar1 = addr >> 32; + dd->ipath_deviceid = ent->device; /* save for later use */ + dd->ipath_vendorid = ent->vendor; + + /* setup the chip-specific functions, as early as possible. */ + switch (ent->device) { + case PCI_DEVICE_ID_INFINIPATH_HT: + ipath_init_ht400_funcs(dd); + break; + case PCI_DEVICE_ID_INFINIPATH_PE800: + ipath_init_pe800_funcs(dd); + break; + default: + ipath_dev_err(dd, "Found unknown PathScale deviceid 0x%x, " + "failing\n", ent->device); + return -ENODEV; + } + + for (j = 0; j < 6; j++) { + if (!pdev->resource[j].start) + continue; + ipath_cdbg(VERBOSE, "BAR %d start %lx, end %lx, len %lx\n", + j, pdev->resource[j].start, + pdev->resource[j].end, + pci_resource_len(pdev, j)); + } + + if (!addr) { + ipath_dev_err(dd, "No valid address in BAR 0!\n"); + ret = -ENODEV; + goto bail_regions; + } + + dd->ipath_deviceid = ent->device; /* save for later use */ + dd->ipath_vendorid = ent->vendor; + + ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + if (ret) { + ipath_dev_err(dd, "Failed to read PCI revision ID unit " + "%u: err %d\n", dd->ipath_unit, -ret); + goto bail_regions; /* shouldn't ever happen */ + } + dd->ipath_pcirev = rev; + + dd->ipath_kregbase = ioremap_nocache(addr, len); + + if (!dd->ipath_kregbase) { + ipath_dbg("Unable to map io addr %llx to kvirt, failing\n", + addr); + ret = -ENOMEM; + goto bail_iounmap; + } + dd->ipath_kregend = (u64 __iomem *) + ((void __iomem *)dd->ipath_kregbase + len); + dd->ipath_physaddr = addr; /* used for io_remap, etc. */ + /* for user mmap */ + dd->ipath_kregvirt = (u64 __iomem *) phys_to_virt(addr); + ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p " + "kregvirt %p\n", addr, dd->ipath_kregbase, + dd->ipath_kregvirt); + + /* + * clear ipath_flags here instead of in ipath_init_chip as it is set + * by ipath_setup_htconfig. + */ + dd->ipath_flags = 0; + + if (dd->ipath_f_bus(dd, pdev)) + ipath_dev_err(dd, "Failed to setup config space; " + "continuing anyway\n"); + + /* + * set up our interrupt handler; SA_SHIRQ probably not needed, + * since MSI interrupts shouldn't be shared but won't hurt for now. + * check 0 irq after we return from chip-specific bus setup, since + * that can affect this due to setup + */ + if (!pdev->irq) + ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " + "work\n"); + else { + ret = request_irq(pdev->irq, ipath_intr, SA_SHIRQ, + IPATH_DRV_NAME, dd); + if (ret) { + ipath_dev_err(dd, "Couldn't setup irq handler, " + "irq=%u: %d\n", pdev->irq, ret); + goto bail_iounmap; + } + } + + ret = ipath_init_chip(dd, 0); /* do the chip-specific init */ + if (ret) + goto bail_iounmap; + + ret = ipath_enable_wc(dd); + + if (ret) { + ipath_dev_err(dd, "Write combining not enabled " + "(err %d): performance may be poor\n", + -ret); + ret = 0; + } + + ipath_device_create_group(&pdev->dev, dd); + ipathfs_add_device(dd); + ipath_user_add(dd); + ipath_layer_add(dd); + + goto bail; + +bail_iounmap: + iounmap((volatile void __iomem *) dd->ipath_kregbase); + +bail_regions: + pci_release_regions(pdev); + +bail_disable: + pci_disable_device(pdev); + +bail_devdata: + ipath_free_devdata(pdev, dd); + +bail_rcvhdrtail: + cleanup_port0_rcvhdrtail(pdev); + +bail: + return ret; +} + +static void __devexit ipath_remove_one(struct pci_dev *pdev) +{ + struct ipath_devdata *dd; + + ipath_cdbg(VERBOSE, "removing, pdev=%p\n", pdev); + if (!pdev) + return; + + dd = pci_get_drvdata(pdev); + ipath_layer_del(dd); + ipath_user_del(dd); + ipathfs_remove_device(dd); + ipath_device_remove_group(&pdev->dev, dd); + ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, " + "unit %u\n", dd, (u32) dd->ipath_unit); + if (dd->ipath_kregbase) { + ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", + dd->ipath_kregbase); + iounmap((volatile void __iomem *) dd->ipath_kregbase); + dd->ipath_kregbase = NULL; + } + pci_release_regions(pdev); + ipath_cdbg(VERBOSE, "calling pci_disable_device\n"); + pci_disable_device(pdev); + + ipath_free_devdata(pdev, dd); + cleanup_port0_rcvhdrtail(pdev); +} + +/* general driver use */ +DEFINE_MUTEX(ipath_mutex); + +static DEFINE_SPINLOCK(ipath_pioavail_lock); + +/** + * ipath_disarm_piobufs - cancel a range of PIO buffers + * @dd: the infinipath device + * @first: the first PIO buffer to cancel + * @cnt: the number of PIO buffers to cancel + * + * cancel a range of PIO buffers, used when they might be armed, but + * not triggered. Used at init to ensure buffer state, and also user + * process close, in case it died while writing to a PIO buffer + * Also after errors. + */ +void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first, + unsigned cnt) +{ + unsigned i, last = first + cnt; + u64 sendctrl, sendorig; + + ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first); + sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM; + for (i = first; i < last; i++) { + sendctrl = sendorig | + (i << INFINIPATH_S_DISARMPIOBUF_SHIFT); + ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, + sendctrl); + } + + /* + * Write it again with current value, in case ipath_sendctrl changed + * while we were looping; no critical bits that would require + * locking. + * + * Write a 0, and then the original value, reading scratch in + * between. This seems to avoid a chip timing race that causes + * pioavail updates to memory to stop. + */ + ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, + 0); + sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); + ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, + dd->ipath_sendctrl); +} + +/** + * ipath_wait_linkstate - wait for an IB link state change to occur + * @dd: the infinipath device + * @state: the state to wait for + * @msecs: the number of milliseconds to wait + * + * wait up to msecs milliseconds for IB link state change to occur for + * now, take the easy polling route. Currently used only by + * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise + * -ETIMEDOUT state can have multiple states set, for any of several + * transitions. + */ +int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs) +{ + dd->ipath_sma_state_wanted = state; + wait_event_interruptible_timeout(ipath_sma_state_wait, + (dd->ipath_flags & state), + msecs_to_jiffies(msecs)); + dd->ipath_sma_state_wanted = 0; + + if (!(dd->ipath_flags & state)) { + u64 val; + ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n", + /* test INIT ahead of DOWN, both can be set */ + (state & IPATH_LINKINIT) ? "INIT" : + ((state & IPATH_LINKDOWN) ? "DOWN" : + ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")), + msecs); + val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); + ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n", + (unsigned long long) ipath_read_kreg64( + dd, dd->ipath_kregs->kr_ibcctrl), + (unsigned long long) val, + ipath_ibcstatus_str[val & 0xf]); + } + return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT; +} + +void ipath_decode_err(char *buf, size_t blen, ipath_err_t err) +{ + *buf = '\0'; + if (err & INFINIPATH_E_RHDRLEN) + strlcat(buf, "rhdrlen ", blen); + if (err & INFINIPATH_E_RBADTID) + strlcat(buf, "rbadtid ", blen); + if (err & INFINIPATH_E_RBADVERSION) + strlcat(buf, "rbadversion ", blen); + if (err & INFINIPATH_E_RHDR) + strlcat(buf, "rhdr ", blen); + if (err & INFINIPATH_E_RLONGPKTLEN) + strlcat(buf, "rlongpktlen ", blen); + if (err & INFINIPATH_E_RSHORTPKTLEN) + strlcat(buf, "rshortpktlen ", blen); + if (err & INFINIPATH_E_RMAXPKTLEN) + strlcat(buf, "rmaxpktlen ", blen); + if (err & INFINIPATH_E_RMINPKTLEN) + strlcat(buf, "rminpktlen ", blen); + if (err & INFINIPATH_E_RFORMATERR) + strlcat(buf, "rformaterr ", blen); + if (err & INFINIPATH_E_RUNSUPVL) + strlcat(buf, "runsupvl ", blen); + if (err & INFINIPATH_E_RUNEXPCHAR) + strlcat(buf, "runexpchar ", blen); + if (err & INFINIPATH_E_RIBFLOW) + strlcat(buf, "ribflow ", blen); + if (err & INFINIPATH_E_REBP) + strlcat(buf, "EBP ", blen); + if (err & INFINIPATH_E_SUNDERRUN) + strlcat(buf, "sunderrun ", blen); + if (err & INFINIPATH_E_SPIOARMLAUNCH) + strlcat(buf, "spioarmlaunch ", blen); + if (err & INFINIPATH_E_SUNEXPERRPKTNUM) + strlcat(buf, "sunexperrpktnum ", blen); + if (err & INFINIPATH_E_SDROPPEDDATAPKT) + strlcat(buf, "sdroppeddatapkt ", blen); + if (err & INFINIPATH_E_SDROPPEDSMPPKT) + strlcat(buf, "sdroppedsmppkt ", blen); + if (err & INFINIPATH_E_SMAXPKTLEN) + strlcat(buf, "smaxpktlen ", blen); + if (err & INFINIPATH_E_SMINPKTLEN) + strlcat(buf, "sminpktlen ", blen); + if (err & INFINIPATH_E_SUNSUPVL) + strlcat(buf, "sunsupVL ", blen); + if (err & INFINIPATH_E_SPKTLEN) + strlcat(buf, "spktlen ", blen); + if (err & INFINIPATH_E_INVALIDADDR) + strlcat(buf, "invalidaddr ", blen); + if (err & INFINIPATH_E_RICRC) + strlcat(buf, "CRC ", blen); + if (err & INFINIPATH_E_RVCRC) + strlcat(buf, "VCRC ", blen); + if (err & INFINIPATH_E_RRCVEGRFULL) + strlcat(buf, "rcvegrfull ", blen); + if (err & INFINIPATH_E_RRCVHDRFULL) + strlcat(buf, "rcvhdrfull ", blen); + if (err & INFINIPATH_E_IBSTATUSCHANGED) + strlcat(buf, "ibcstatuschg ", blen); + if (err & INFINIPATH_E_RIBLOSTLINK) + strlcat(buf, "riblostlink ", blen); + if (err & INFINIPATH_E_HARDWARE) + strlcat(buf, "hardware ", blen); + if (err & INFINIPATH_E_RESET) + strlcat(buf, "reset ", blen); +} + +/** + * get_rhf_errstring - decode RHF errors + * @err: the err number + * @msg: the output buffer + * @len: the length of the output buffer + * + * only used one place now, may want more later + */ +static void get_rhf_errstring(u32 err, char *msg, size_t len) +{ + /* if no errors, and so don't need to check what's first */ + *msg = '\0'; + + if (err & INFINIPATH_RHF_H_ICRCERR) + strlcat(msg, "icrcerr ", len); + if (err & INFINIPATH_RHF_H_VCRCERR) + strlcat(msg, "vcrcerr ", len); + if (err & INFINIPATH_RHF_H_PARITYERR) + strlcat(msg, "parityerr ", len); + if (err & INFINIPATH_RHF_H_LENERR) + strlcat(msg, "lenerr ", len); + if (err & INFINIPATH_RHF_H_MTUERR) + strlcat(msg, "mtuerr ", len); + if (err & INFINIPATH_RHF_H_IHDRERR) + /* infinipath hdr checksum error */ + strlcat(msg, "ipathhdrerr ", len); + if (err & INFINIPATH_RHF_H_TIDERR) + strlcat(msg, "tiderr ", len); + if (err & INFINIPATH_RHF_H_MKERR) + /* bad port, offset, etc. */ + strlcat(msg, "invalid ipathhdr ", len); + if (err & INFINIPATH_RHF_H_IBERR) + strlcat(msg, "iberr ", len); + if (err & INFINIPATH_RHF_L_SWA) + strlcat(msg, "swA ", len); + if (err & INFINIPATH_RHF_L_SWB) + strlcat(msg, "swB ", len); +} + +/** + * ipath_get_egrbuf - get an eager buffer + * @dd: the infinipath device + * @bufnum: the eager buffer to get + * @err: unused + * + * must only be called if ipath_pd[port] is known to be allocated + */ +static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum, + int err) +{ + return dd->ipath_port0_skbs ? + (void *)dd->ipath_port0_skbs[bufnum]->data : NULL; +} + +/** + * ipath_alloc_skb - allocate an skb and buffer with possible constraints + * @dd: the infinipath device + * @gfp_mask: the sk_buff SFP mask + */ +struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, + gfp_t gfp_mask) +{ + struct sk_buff *skb; + u32 len; + + /* + * Only fully supported way to handle this is to allocate lots + * extra, align as needed, and then do skb_reserve(). That wastes + * a lot of memory... I'll have to hack this into infinipath_copy + * also. + */ + + /* + * We need 4 extra bytes for unaligned transfer copying + */ + if (dd->ipath_flags & IPATH_4BYTE_TID) { + /* we need a 4KB multiple alignment, and there is no way + * to do it except to allocate extra and then skb_reserve + * enough to bring it up to the right alignment. + */ + len = dd->ipath_ibmaxlen + 4 + (1 << 11) - 1; + } + else + len = dd->ipath_ibmaxlen + 4; + skb = __dev_alloc_skb(len, gfp_mask); + if (!skb) { + ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n", + len); + goto bail; + } + if (dd->ipath_flags & IPATH_4BYTE_TID) { + u32 una = ((1 << 11) - 1) & (unsigned long)(skb->data + 4); + if (una) + skb_reserve(skb, 4 + (1 << 11) - una); + else + skb_reserve(skb, 4); + } else + skb_reserve(skb, 4); + +bail: + return skb; +} + +/** + * ipath_rcv_layer - receive a packet for the layered (ethernet) driver + * @dd: the infinipath device + * @etail: the sk_buff number + * @tlen: the total packet length + * @hdr: the ethernet header + * + * Separate routine for better overall optimization + */ +static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail, + u32 tlen, struct ether_header *hdr) +{ + u32 elen; + u8 pad, *bthbytes; + struct sk_buff *skb, *nskb; + + if (dd->ipath_port0_skbs && hdr->sub_opcode == OPCODE_ENCAP) { + /* + * Allocate a new sk_buff to replace the one we give + * to the network stack. + */ + nskb = ipath_alloc_skb(dd, GFP_ATOMIC); + if (!nskb) { + /* count OK packets that we drop */ + ipath_stats.sps_krdrops++; + return; + } + + bthbytes = (u8 *) hdr->bth; + pad = (bthbytes[1] >> 4) & 3; + /* +CRC32 */ + elen = tlen - (sizeof(*hdr) + pad + sizeof(u32)); + + skb = dd->ipath_port0_skbs[etail]; + dd->ipath_port0_skbs[etail] = nskb; + skb_put(skb, elen); + + dd->ipath_f_put_tid(dd, etail + (u64 __iomem *) + ((char __iomem *) dd->ipath_kregbase + + dd->ipath_rcvegrbase), 0, + virt_to_phys(nskb->data)); + + __ipath_layer_rcv(dd, hdr, skb); + + /* another ether packet received */ + ipath_stats.sps_ether_rpkts++; + } + else if (hdr->sub_opcode == OPCODE_LID_ARP) + __ipath_layer_rcv_lid(dd, hdr); +} + +/* + * ipath_kreceive - receive a packet + * @dd: the infinipath device + * + * called from interrupt handler for errors or receive interrupt + */ +void ipath_kreceive(struct ipath_devdata *dd) +{ + u64 *rc; + void *ebuf; + const u32 rsize = dd->ipath_rcvhdrentsize; /* words */ + const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */ + u32 etail = -1, l, hdrqtail; + struct ips_message_header *hdr; + u32 eflags, i, etype, tlen, pkttot = 0; + static u64 totcalls; /* stats, may eventually remove */ + char emsg[128]; + + if (!dd->ipath_hdrqtailptr) { + ipath_dev_err(dd, + "hdrqtailptr not set, can't do receives\n"); + goto bail; + } + + /* There is already a thread processing this queue. */ + if (test_and_set_bit(0, &dd->ipath_rcv_pending)) + goto bail; + + if (dd->ipath_port0head == + (u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) + goto done; + +gotmore: + /* + * read only once at start. If in flood situation, this helps + * performance slightly. If more arrive while we are processing, + * we'll come back here and do them + */ + hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr); + + for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) { + u32 qp; + u8 *bthbytes; + + rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2)); + hdr = (struct ips_message_header *)&rc[1]; + /* + * could make a network order version of IPATH_KD_QP, and + * do the obvious shift before masking to speed this up. + */ + qp = ntohl(hdr->bth[1]) & 0xffffff; + bthbytes = (u8 *) hdr->bth; + + eflags = ips_get_hdr_err_flags((__le32 *) rc); + etype = ips_get_rcv_type((__le32 *) rc); + /* total length */ + tlen = ips_get_length_in_bytes((__le32 *) rc); + ebuf = NULL; + if (etype != RCVHQ_RCV_TYPE_EXPECTED) { + /* + * it turns out that the chips uses an eager buffer + * for all non-expected packets, whether it "needs" + * one or not. So always get the index, but don't + * set ebuf (so we try to copy data) unless the + * length requires it. + */ + etail = ips_get_index((__le32 *) rc); + if (tlen > sizeof(*hdr) || + etype == RCVHQ_RCV_TYPE_NON_KD) + ebuf = ipath_get_egrbuf(dd, etail, 0); + } + + /* + * both tiderr and ipathhdrerr are set for all plain IB + * packets; only ipathhdrerr should be set. + */ + + if (etype != RCVHQ_RCV_TYPE_NON_KD && etype != + RCVHQ_RCV_TYPE_ERROR && ips_get_ipath_ver( + hdr->iph.ver_port_tid_offset) != + IPS_PROTO_VERSION) { + ipath_cdbg(PKT, "Bad InfiniPath protocol version " + "%x\n", etype); + } + + if (eflags & ~(INFINIPATH_RHF_H_TIDERR | + INFINIPATH_RHF_H_IHDRERR)) { + get_rhf_errstring(eflags, emsg, sizeof emsg); + ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " + "tlen=%x opcode=%x egridx=%x: %s\n", + eflags, l, etype, tlen, bthbytes[0], + ips_get_index((__le32 *) rc), emsg); + } else if (etype == RCVHQ_RCV_TYPE_NON_KD) { + int ret = __ipath_verbs_rcv(dd, rc + 1, + ebuf, tlen); + if (ret == -ENODEV) + ipath_cdbg(VERBOSE, + "received IB packet, " + "not SMA (QP=%x)\n", qp); + } else if (etype == RCVHQ_RCV_TYPE_EAGER) { + if (qp == IPATH_KD_QP && + bthbytes[0] == ipath_layer_rcv_opcode && + ebuf) + ipath_rcv_layer(dd, etail, tlen, + (struct ether_header *)hdr); + else + ipath_cdbg(PKT, "typ %x, opcode %x (eager, " + "qp=%x), len %x; ignored\n", + etype, bthbytes[0], qp, tlen); + } + else if (etype == RCVHQ_RCV_TYPE_EXPECTED) + ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", + be32_to_cpu(hdr->bth[0]) & 0xff); + else if (eflags & (INFINIPATH_RHF_H_TIDERR | + INFINIPATH_RHF_H_IHDRERR)) { + /* + * This is a type 3 packet, only the LRH is in the + * rcvhdrq, the rest of the header is in the eager + * buffer. + */ + u8 opcode; + if (ebuf) { + bthbytes = (u8 *) ebuf; + opcode = *bthbytes; + } + else + opcode = 0; + get_rhf_errstring(eflags, emsg, sizeof emsg); + ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, " + "len %x\n", eflags, emsg, opcode, etail, + tlen); + } else { + /* + * error packet, type of error unknown. + * Probably type 3, but we don't know, so don't + * even try to print the opcode, etc. + */ + ipath_dbg("Error Pkt, but no eflags! egrbuf %x, " + "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; " + "hdr %llx %llx %llx %llx %llx\n", + etail, tlen, (unsigned long) rc, l, + (unsigned long long) rc[0], + (unsigned long long) rc[1], + (unsigned long long) rc[2], + (unsigned long long) rc[3], + (unsigned long long) rc[4], + (unsigned long long) rc[5]); + } + l += rsize; + if (l >= maxcnt) + l = 0; + /* + * update for each packet, to help prevent overflows if we + * have lots of packets. + */ + (void)ipath_write_ureg(dd, ur_rcvhdrhead, + dd->ipath_rhdrhead_intr_off | l, 0); + if (etype != RCVHQ_RCV_TYPE_EXPECTED) + (void)ipath_write_ureg(dd, ur_rcvegrindexhead, + etail, 0); + } + + pkttot += i; + + dd->ipath_port0head = l; + + if (hdrqtail != (u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) + /* more arrived while we handled first batch */ + goto gotmore; + + if (pkttot > ipath_stats.sps_maxpkts_call) + ipath_stats.sps_maxpkts_call = pkttot; + ipath_stats.sps_port0pkts += pkttot; + ipath_stats.sps_avgpkts_call = + ipath_stats.sps_port0pkts / ++totcalls; + +done: + clear_bit(0, &dd->ipath_rcv_pending); + smp_mb__after_clear_bit(); + +bail:; +} + +/** + * ipath_update_pio_bufs - update shadow copy of the PIO availability map + * @dd: the infinipath device + * + * called whenever our local copy indicates we have run out of send buffers + * NOTE: This can be called from interrupt context by some code + * and from non-interrupt context by ipath_getpiobuf(). + */ + +static void ipath_update_pio_bufs(struct ipath_devdata *dd) +{ + unsigned long flags; + int i; + const unsigned piobregs = (unsigned)dd->ipath_pioavregs; + + /* If the generation (check) bits have changed, then we update the + * busy bit for the corresponding PIO buffer. This algorithm will + * modify positions to the value they already have in some cases + * (i.e., no change), but it's faster than changing only the bits + * that have changed. + * + * We would like to do this atomicly, to avoid spinlocks in the + * critical send path, but that's not really possible, given the + * type of changes, and that this routine could be called on + * multiple cpu's simultaneously, so we lock in this routine only, + * to avoid conflicting updates; all we change is the shadow, and + * it's a single 64 bit memory location, so by definition the update + * is atomic in terms of what other cpu's can see in testing the + * bits. The spin_lock overhead isn't too bad, since it only + * happens when all buffers are in use, so only cpu overhead, not + * latency or bandwidth is affected. + */ +#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL + if (!dd->ipath_pioavailregs_dma) { + ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n"); + return; + } + if (ipath_debug & __IPATH_VERBDBG) { + /* only if packet debug and verbose */ |