diff options
Diffstat (limited to 'drivers/net/ethernet/sfc/efx.c')
| -rw-r--r-- | drivers/net/ethernet/sfc/efx.c | 1082 |
1 files changed, 719 insertions, 363 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4f86d0cd516..1e274045970 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1,7 +1,7 @@ /**************************************************************************** - * Driver for Solarflare Solarstorm network controllers and boards + * Driver for Solarflare network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2011 Solarflare Communications Inc. + * Copyright 2005-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -17,11 +17,11 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/in.h> -#include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/topology.h> #include <linux/gfp.h> -#include <linux/cpu_rmap.h> +#include <linux/aer.h> +#include <linux/interrupt.h> #include "net_driver.h" #include "efx.h" #include "nic.h" @@ -71,27 +71,34 @@ const char *const efx_loopback_mode_names[] = { const unsigned int efx_reset_type_max = RESET_TYPE_MAX; const char *const efx_reset_type_names[] = { - [RESET_TYPE_INVISIBLE] = "INVISIBLE", - [RESET_TYPE_ALL] = "ALL", - [RESET_TYPE_WORLD] = "WORLD", - [RESET_TYPE_DISABLE] = "DISABLE", - [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", - [RESET_TYPE_INT_ERROR] = "INT_ERROR", - [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", - [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", - [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", - [RESET_TYPE_TX_SKIP] = "TX_SKIP", - [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_MC_BIST] = "MC_BIST", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", + [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", }; -#define EFX_MAX_MTU (9 * 1024) - /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. */ static struct workqueue_struct *reset_workqueue; +/* How often and how many times to poll for a reset while waiting for a + * BIST that another function started to complete. + */ +#define BIST_WAIT_DELAY_MS 100 +#define BIST_WAIT_DELAY_COUNT 100 + /************************************************************************** * * Configurable values @@ -106,8 +113,8 @@ static struct workqueue_struct *reset_workqueue; * * This is only used in MSI-X interrupt mode */ -static unsigned int separate_tx_channels; -module_param(separate_tx_channels, uint, 0444); +static bool separate_tx_channels; +module_param(separate_tx_channels, bool, 0444); MODULE_PARM_DESC(separate_tx_channels, "Use separate channels for TX and RX"); @@ -117,9 +124,12 @@ MODULE_PARM_DESC(separate_tx_channels, static int napi_weight = 64; /* This is the time (in jiffies) between invocations of the hardware - * monitor. On Falcon-based NICs, this will: + * monitor. + * On Falcon-based NICs, this will: * - Check the on-board hardware monitor; * - Poll the link state and reconfigure the hardware as necessary. + * On Siena-based NICs for power systems with EEH support, this will give EEH a + * chance to start. */ static unsigned int efx_monitor_interval = 1 * HZ; @@ -160,8 +170,8 @@ static unsigned int rss_cpus; module_param(rss_cpus, uint, 0444); MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); -static int phy_flash_cfg; -module_param(phy_flash_cfg, int, 0644); +static bool phy_flash_cfg; +module_param(phy_flash_cfg, bool, 0644); MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); static unsigned irq_adapt_low_thresh = 8000; @@ -187,8 +197,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); * *************************************************************************/ -static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq); -static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq); +static int efx_soft_enable_interrupts(struct efx_nic *efx); +static void efx_soft_disable_interrupts(struct efx_nic *efx); static void efx_remove_channel(struct efx_channel *channel); static void efx_remove_channels(struct efx_nic *efx); static const struct efx_channel_type efx_default_channel_type; @@ -203,13 +213,14 @@ static void efx_stop_all(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ (efx->state == STATE_DISABLED)) \ ASSERT_RTNL(); \ } while (0) static int efx_check_disabled(struct efx_nic *efx) { - if (efx->state == STATE_DISABLED) { + if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { netif_err(efx, drv, efx->net_dev, "device is disabled due to earlier errors\n"); return -EIO; @@ -242,37 +253,13 @@ static int efx_process_channel(struct efx_channel *channel, int budget) struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - /* Deliver last RX packet. */ - if (channel->rx_pkt) { - __efx_rx_packet(channel, channel->rx_pkt); - channel->rx_pkt = NULL; - } - if (rx_queue->enabled) { - efx_rx_strategy(channel); - efx_fast_push_rx_descriptors(rx_queue); - } + efx_rx_flush_packet(channel); + efx_fast_push_rx_descriptors(rx_queue, true); } return spent; } -/* Mark channel as finished processing - * - * Note that since we will not receive further interrupts for this - * channel before we finish processing and call the eventq_read_ack() - * method, there is no need to use the interrupt hold-off timers. - */ -static inline void efx_channel_processed(struct efx_channel *channel) -{ - /* The interrupt handler for this channel may set work_pending - * as soon as we acknowledge the events we've seen. Make sure - * it's cleared before then. */ - channel->work_pending = false; - smp_wmb(); - - efx_nic_eventq_read_ack(channel); -} - /* NAPI poll handler * * NAPI guarantees serialisation of polls of the same device, which @@ -317,58 +304,16 @@ static int efx_poll(struct napi_struct *napi, int budget) /* There is no race here; although napi_disable() will * only wait for napi_complete(), this isn't a problem - * since efx_channel_processed() will have no effect if + * since efx_nic_eventq_read_ack() will have no effect if * interrupts have already been disabled. */ napi_complete(napi); - efx_channel_processed(channel); + efx_nic_eventq_read_ack(channel); } return spent; } -/* Process the eventq of the specified channel immediately on this CPU - * - * Disable hardware generated interrupts, wait for any existing - * processing to finish, then directly poll (and ack ) the eventq. - * Finally reenable NAPI and interrupts. - * - * This is for use only during a loopback self-test. It must not - * deliver any packets up the stack as this can result in deadlock. - */ -void efx_process_channel_now(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - - BUG_ON(channel->channel >= efx->n_channels); - BUG_ON(!channel->enabled); - BUG_ON(!efx->loopback_selftest); - - /* Disable interrupts and wait for ISRs to complete */ - efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) { - synchronize_irq(efx->legacy_irq); - efx->legacy_irq_enabled = false; - } - if (channel->irq) - synchronize_irq(channel->irq); - - /* Wait for any NAPI processing to complete */ - napi_disable(&channel->napi_str); - - /* Poll the channel */ - efx_process_channel(channel, channel->eventq_mask + 1); - - /* Ack the eventq. This may cause an interrupt to be generated - * when they are reenabled */ - efx_channel_processed(channel); - - napi_enable(&channel->napi_str); - if (efx->legacy_irq) - efx->legacy_irq_enabled = true; - efx_nic_enable_interrupts(efx); -} - /* Create event queue * Event queue memory allocations are done only once. If the channel * is reset, the memory buffer will be reused; this guards against @@ -392,14 +337,23 @@ static int efx_probe_eventq(struct efx_channel *channel) } /* Prepare channel's event queue */ -static void efx_init_eventq(struct efx_channel *channel) +static int efx_init_eventq(struct efx_channel *channel) { - netif_dbg(channel->efx, drv, channel->efx->net_dev, - "chan %d init event queue\n", channel->channel); + struct efx_nic *efx = channel->efx; + int rc; + + EFX_WARN_ON_PARANOID(channel->eventq_init); - channel->eventq_read_ptr = 0; + netif_dbg(efx, drv, efx->net_dev, + "chan %d init event queue\n", channel->channel); - efx_nic_init_eventq(channel); + rc = efx_nic_init_eventq(channel); + if (rc == 0) { + efx->type->push_irq_moderation(channel); + channel->eventq_read_ptr = 0; + channel->eventq_init = true; + } + return rc; } /* Enable event queue processing and NAPI */ @@ -408,11 +362,7 @@ static void efx_start_eventq(struct efx_channel *channel) netif_dbg(channel->efx, ifup, channel->efx->net_dev, "chan %d start event queue\n", channel->channel); - /* The interrupt handler for this channel may set work_pending - * as soon as we enable it. Make sure it's cleared before - * then. Similarly, make sure it sees the enabled flag set. - */ - channel->work_pending = false; + /* Make sure the NAPI handler sees the enabled flag set */ channel->enabled = true; smp_wmb(); @@ -432,10 +382,14 @@ static void efx_stop_eventq(struct efx_channel *channel) static void efx_fini_eventq(struct efx_channel *channel) { + if (!channel->eventq_init) + return; + netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d fini event queue\n", channel->channel); efx_nic_fini_eventq(channel); + channel->eventq_init = false; } static void efx_remove_eventq(struct efx_channel *channel) @@ -550,8 +504,6 @@ static int efx_probe_channel(struct efx_channel *channel) goto fail; } - channel->n_rx_frm_trunc = 0; - return 0; fail: @@ -584,8 +536,8 @@ static void efx_set_channel_names(struct efx_nic *efx) efx_for_each_channel(channel, efx) channel->type->get_name(channel, - efx->channel_name[channel->channel], - sizeof(efx->channel_name[0])); + efx->msi_context[channel->channel].name, + sizeof(efx->msi_context[0].name)); } static int efx_probe_channels(struct efx_nic *efx) @@ -625,20 +577,53 @@ fail: */ static void efx_start_datapath(struct efx_nic *efx) { + bool old_rx_scatter = efx->rx_scatter; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; struct efx_channel *channel; + size_t rx_buf_len; /* Calculate the rx buffer allocation parameters required to * support the current MTU, including padding for header * alignment and overruns. */ - efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + - EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + - efx->type->rx_buffer_hash_size + - efx->type->rx_buffer_padding); - efx->rx_buffer_order = get_order(efx->rx_buffer_len + - sizeof(struct efx_rx_page_state)); + efx->rx_dma_len = (efx->rx_prefix_size + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + rx_buf_len = (sizeof(struct efx_rx_page_state) + + efx->rx_ip_align + efx->rx_dma_len); + if (rx_buf_len <= PAGE_SIZE) { + efx->rx_scatter = efx->type->always_rx_scatter; + efx->rx_buffer_order = 0; + } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); + BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + + 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, + EFX_RX_BUF_ALIGNMENT) > + PAGE_SIZE); + efx->rx_scatter = true; + efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; + efx->rx_buffer_order = 0; + } else { + efx->rx_scatter = false; + efx->rx_buffer_order = get_order(rx_buf_len); + } + + efx_rx_config_page_split(efx); + if (efx->rx_buffer_order) + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u; page order=%u batch=%u\n", + efx->rx_dma_len, efx->rx_buffer_order, + efx->rx_pages_per_batch); + else + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u step=%u bpp=%u; page batch=%u\n", + efx->rx_dma_len, efx->rx_page_buf_step, + efx->rx_bufs_per_page, efx->rx_pages_per_batch); + + /* RX filters may also have scatter-enabled flags */ + if (efx->rx_scatter != old_rx_scatter) + efx->type->filter_update_rx_scatter(efx); /* We must keep at least one descriptor in a TX ring empty. * We could avoid this when the queue size does not exactly @@ -652,21 +637,24 @@ static void efx_start_datapath(struct efx_nic *efx) /* Initialise the channels */ efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) + efx_for_each_channel_tx_queue(tx_queue, channel) { efx_init_tx_queue(tx_queue); - - /* The rx buffer allocation strategy is MTU dependent */ - efx_rx_strategy(channel); + atomic_inc(&efx->active_queues); + } efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); - efx_nic_generate_fill_event(rx_queue); + atomic_inc(&efx->active_queues); + efx_stop_eventq(channel); + efx_fast_push_rx_descriptors(rx_queue, false); + efx_start_eventq(channel); } - WARN_ON(channel->rx_pkt != NULL); - efx_rx_strategy(channel); + WARN_ON(channel->rx_pkt_n_frags); } + efx_ptp_start_datapath(efx); + if (netif_device_present(efx->net_dev)) netif_tx_wake_all_queues(efx->net_dev); } @@ -676,30 +664,17 @@ static void efx_stop_datapath(struct efx_nic *efx) struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; - struct pci_dev *dev = efx->pci_dev; int rc; EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); - /* Only perform flush if dma is enabled */ - if (dev->is_busmaster) { - rc = efx_nic_flush_queues(efx); + efx_ptp_stop_datapath(efx); - if (rc && EFX_WORKAROUND_7803(efx)) { - /* Schedule a reset to recover from the flush failure. The - * descriptor caches reference memory we're about to free, - * but falcon_reconfigure_mac_wrapper() won't reconnect - * the MACs because of the pending reset. */ - netif_err(efx, drv, efx->net_dev, - "Resetting to recover from flush failure\n"); - efx_schedule_reset(efx, RESET_TYPE_ALL); - } else if (rc) { - netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); - } else { - netif_dbg(efx, drv, efx->net_dev, - "successfully flushed all queues\n"); - } + /* Stop RX refill */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_queue->refill_enabled = false; } efx_for_each_channel(channel, efx) { @@ -713,7 +688,26 @@ static void efx_stop_datapath(struct efx_nic *efx) efx_stop_eventq(channel); efx_start_eventq(channel); } + } + + rc = efx->type->fini_dmaq(efx); + if (rc && EFX_WORKAROUND_7803(efx)) { + /* Schedule a reset to recover from the flush failure. The + * descriptor caches reference memory we're about to free, + * but falcon_reconfigure_mac_wrapper() won't reconnect + * the MACs because of the pending reset. + */ + netif_err(efx, drv, efx->net_dev, + "Resetting to recover from flush failure\n"); + efx_schedule_reset(efx, RESET_TYPE_ALL); + } else if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } + efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); efx_for_each_possible_channel_tx_queue(tx_queue, channel) @@ -751,7 +745,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; u32 old_rxq_entries, old_txq_entries; unsigned i, next_buffer_table = 0; - int rc; + int rc, rc2; rc = efx_check_disabled(efx); if (rc) @@ -779,8 +773,9 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) tx_queue->txd.entries); } + efx_device_detach_sync(efx); efx_stop_all(efx); - efx_stop_interrupts(efx, true); + efx_soft_disable_interrupts(efx); /* Clone channels (where possible) */ memset(other_channel, 0, sizeof(other_channel)); @@ -830,8 +825,16 @@ out: } } - efx_start_interrupts(efx, true); - efx_start_all(efx); + rc2 = efx_soft_enable_interrupts(efx); + if (rc2) { + rc = rc ? rc : rc2; + netif_err(efx, drv, efx->net_dev, + "unable to restart interrupts on channel reallocation\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + } else { + efx_start_all(efx); + netif_device_attach(efx->net_dev); + } return rc; rollback: @@ -901,10 +904,9 @@ void efx_link_status_changed(struct efx_nic *efx) /* Status message for kernel log */ if (link_state->up) netif_info(efx, link, efx->net_dev, - "link up at %uMbps %s-duplex (MTU %d)%s\n", + "link up at %uMbps %s-duplex (MTU %d)\n", link_state->speed, link_state->fd ? "full" : "half", - efx->net_dev->mtu, - (efx->promiscuous ? " [PROMISC]" : "")); + efx->net_dev->mtu); else netif_info(efx, link, efx->net_dev, "link down\n"); } @@ -953,10 +955,6 @@ int __efx_reconfigure_port(struct efx_nic *efx) WARN_ON(!mutex_is_locked(&efx->mac_lock)); - /* Serialise the promiscuous flag with efx_set_rx_mode. */ - netif_addr_lock_bh(efx->net_dev); - netif_addr_unlock_bh(efx->net_dev); - /* Disable PHY transmit in mac level loopbacks */ phy_mode = efx->phy_mode; if (LOOPBACK_INTERNAL(efx)) @@ -1015,7 +1013,7 @@ static int efx_probe_port(struct efx_nic *efx) return rc; /* Initialise MAC address to permanent address */ - memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN); + ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); return 0; } @@ -1061,18 +1059,23 @@ static void efx_start_port(struct efx_nic *efx) mutex_lock(&efx->mac_lock); efx->port_enabled = true; - /* efx_mac_work() might have been scheduled after efx_stop_port(), - * and then cancelled by efx_flush_all() */ + /* Ensure MAC ingress/egress is enabled */ efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); } -/* Prevent efx_mac_work() and efx_monitor() from working */ +/* Cancel work for MAC reconfiguration, periodic hardware monitoring + * and the async self-test, wait for them to finish and prevent them + * being scheduled again. This doesn't cover online resets, which + * should only be cancelled when removing the device. + */ static void efx_stop_port(struct efx_nic *efx) { netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); + EFX_ASSERT_RESET_SERIALISED(efx); + mutex_lock(&efx->mac_lock); efx->port_enabled = false; mutex_unlock(&efx->mac_lock); @@ -1080,6 +1083,10 @@ static void efx_stop_port(struct efx_nic *efx) /* Serialise against efx_set_multicast_list() */ netif_addr_lock_bh(efx->net_dev); netif_addr_unlock_bh(efx->net_dev); + + cancel_delayed_work_sync(&efx->monitor_work); + efx_selftest_async_cancel(efx); + cancel_work_sync(&efx->mac_work); } static void efx_fini_port(struct efx_nic *efx) @@ -1109,11 +1116,83 @@ static void efx_remove_port(struct efx_nic *efx) * **************************************************************************/ +static LIST_HEAD(efx_primary_list); +static LIST_HEAD(efx_unassociated_list); + +static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) +{ + return left->type == right->type && + left->vpd_sn && right->vpd_sn && + !strcmp(left->vpd_sn, right->vpd_sn); +} + +static void efx_associate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + if (efx->primary == efx) { + /* Adding primary function; look for secondaries */ + + netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); + list_add_tail(&efx->node, &efx_primary_list); + + list_for_each_entry_safe(other, next, &efx_unassociated_list, + node) { + if (efx_same_controller(efx, other)) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to secondary list of %s %s\n", + pci_name(efx->pci_dev), + efx->net_dev->name); + list_add_tail(&other->node, + &efx->secondary_list); + other->primary = efx; + } + } + } else { + /* Adding secondary function; look for primary */ + + list_for_each_entry(other, &efx_primary_list, node) { + if (efx_same_controller(efx, other)) { + netif_dbg(efx, probe, efx->net_dev, + "adding to secondary list of %s %s\n", + pci_name(other->pci_dev), + other->net_dev->name); + list_add_tail(&efx->node, + &other->secondary_list); + efx->primary = other; + return; + } + } + + netif_dbg(efx, probe, efx->net_dev, + "adding to unassociated list\n"); + list_add_tail(&efx->node, &efx_unassociated_list); + } +} + +static void efx_dissociate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + list_del(&efx->node); + efx->primary = NULL; + + list_for_each_entry_safe(other, next, &efx->secondary_list, node) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to unassociated list\n"); + list_add_tail(&other->node, &efx_unassociated_list); + other->primary = NULL; + } +} + /* This configures the PCI device to enable I/O and DMA. */ static int efx_init_io(struct efx_nic *efx) { struct pci_dev *pci_dev = efx->pci_dev; dma_addr_t dma_mask = efx->type->max_dma_mask; + unsigned int mem_map_size = efx->type->mem_map_size(efx); int rc; netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); @@ -1134,7 +1213,7 @@ static int efx_init_io(struct efx_nic *efx) */ while (dma_mask > 0x7fffffffUL) { if (dma_supported(&pci_dev->dev, dma_mask)) { - rc = dma_set_mask(&pci_dev->dev, dma_mask); + rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); if (rc == 0) break; } @@ -1147,16 +1226,6 @@ static int efx_init_io(struct efx_nic *efx) } netif_dbg(efx, probe, efx->net_dev, "using DMA mask %llx\n", (unsigned long long) dma_mask); - rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask); - if (rc) { - /* dma_set_coherent_mask() is not *allowed* to - * fail with a mask that dma_set_mask() accepted, - * but just in case... - */ - netif_err(efx, probe, efx->net_dev, - "failed to set consistent DMA mask\n"); - goto fail2; - } efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); @@ -1166,20 +1235,18 @@ static int efx_init_io(struct efx_nic *efx) rc = -EIO; goto fail3; } - efx->membase = ioremap_nocache(efx->membase_phys, - efx->type->mem_map_size); + efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size); + (unsigned long long)efx->membase_phys, mem_map_size); rc = -ENOMEM; goto fail4; } netif_dbg(efx, probe, efx->net_dev, "memory BAR at %llx+%x (virtual %p)\n", - (unsigned long long)efx->membase_phys, - efx->type->mem_map_size, efx->membase); + (unsigned long long)efx->membase_phys, mem_map_size, + efx->membase); return 0; @@ -1253,36 +1320,11 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) return count; } -static int -efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) -{ -#ifdef CONFIG_RFS_ACCEL - unsigned int i; - int rc; - - efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); - if (!efx->net_dev->rx_cpu_rmap) - return -ENOMEM; - for (i = 0; i < efx->n_rx_channels; i++) { - rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, - xentries[i].vector); - if (rc) { - free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); - efx->net_dev->rx_cpu_rmap = NULL; - return rc; - } - } -#endif - return 0; -} - /* Probe the number and type of interrupts we are able to obtain, and * the resulting numbers of channels and RX queues. */ static int efx_probe_interrupts(struct efx_nic *efx) { - unsigned int max_channels = - min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS); unsigned int extra_channels = 0; unsigned int i, j; int rc; @@ -1299,24 +1341,27 @@ static int efx_probe_interrupts(struct efx_nic *efx) if (separate_tx_channels) n_channels *= 2; n_channels += extra_channels; - n_channels = min(n_channels, max_channels); + n_channels = min(n_channels, efx->max_channels); for (i = 0; i < n_channels; i++) xentries[i].entry = i; - rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); - if (rc > 0) { + rc = pci_enable_msix_range(efx->pci_dev, + xentries, 1, n_channels); + if (rc < 0) { + /* Fall back to single channel MSI */ + efx->interrupt_mode = EFX_INT_MODE_MSI; + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X\n"); + } else if (rc < n_channels) { netif_err(efx, drv, efx->net_dev, "WARNING: Insufficient MSI-X vectors" " available (%d < %u).\n", rc, n_channels); netif_err(efx, drv, efx->net_dev, "WARNING: Performance may be reduced.\n"); - EFX_BUG_ON_PARANOID(rc >= n_channels); n_channels = rc; - rc = pci_enable_msix(efx->pci_dev, xentries, - n_channels); } - if (rc == 0) { + if (rc > 0) { efx->n_channels = n_channels; if (n_channels > extra_channels) n_channels -= extra_channels; @@ -1329,19 +1374,9 @@ static int efx_probe_interrupts(struct efx_nic *efx) efx->n_tx_channels = n_channels; efx->n_rx_channels = n_channels; } - rc = efx_init_rx_cpu_rmap(efx, xentries); - if (rc) { - pci_disable_msix(efx->pci_dev); - return rc; - } for (i = 0; i < efx->n_channels; i++) efx_get_channel(efx, i)->irq = xentries[i].vector; - } else { - /* Fall back to single channel MSI */ - efx->interrupt_mode = EFX_INT_MODE_MSI; - netif_err(efx, drv, efx->net_dev, - "could not enable MSI-X\n"); } } @@ -1390,27 +1425,42 @@ static int efx_probe_interrupts(struct efx_nic *efx) return 0; } -/* Enable interrupts, then probe and start the event queues */ -static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) +static int efx_soft_enable_interrupts(struct efx_nic *efx) { - struct efx_channel *channel; + struct efx_channel *channel, *end_channel; + int rc; BUG_ON(efx->state == STATE_DISABLED); - if (efx->legacy_irq) - efx->legacy_irq_enabled = true; - efx_nic_enable_interrupts(efx); + efx->irq_soft_enabled = true; + smp_wmb(); efx_for_each_channel(channel, efx) { - if (!channel->type->keep_eventq || !may_keep_eventq) - efx_init_eventq(channel); + if (!channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } efx_start_eventq(channel); } efx_mcdi_mode_event(efx); + + return 0; +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + return rc; } -static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) +static void efx_soft_disable_interrupts(struct efx_nic *efx) { struct efx_channel *channel; @@ -1419,20 +1469,79 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) efx_mcdi_mode_poll(efx); - efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) { + efx->irq_soft_enabled = false; + smp_wmb(); + + if (efx->legacy_irq) synchronize_irq(efx->legacy_irq); - efx->legacy_irq_enabled = false; - } efx_for_each_channel(channel, efx) { if (channel->irq) synchronize_irq(channel->irq); efx_stop_eventq(channel); - if (!channel->type->keep_eventq || !may_keep_eventq) + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + /* Flush the asynchronous MCDI request queue */ + efx_mcdi_flush_async(efx); +} + +static int efx_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + BUG_ON(efx->state == STATE_DISABLED); + + if (efx->eeh_disabled_legacy_irq) { + enable_irq(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = false; + } + + efx->type->irq_enable_master(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + } + + rc = efx_soft_enable_interrupts(efx); + if (rc) + goto fail; + + return 0; + +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + if (channel->type->keep_eventq) efx_fini_eventq(channel); } + + efx->type->irq_disable_non_ev(efx); + + return rc; +} + +static void efx_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_soft_disable_interrupts(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); } static void efx_remove_interrupts(struct efx_nic *efx) @@ -1489,9 +1598,13 @@ static int efx_probe_nic(struct efx_nic *efx) * in MSI-X interrupts. */ rc = efx_probe_interrupts(efx); if (rc) - goto fail; + goto fail1; + + efx_set_channels(efx); - efx->type->dimension_resources(efx); + rc = efx->type->dimension_resources(efx); + if (rc) + goto fail2; if (efx->n_channels > 1) get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); @@ -1499,7 +1612,6 @@ static int efx_probe_nic(struct efx_nic *efx) efx->rx_indir_table[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); - efx_set_channels(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); @@ -1509,7 +1621,9 @@ static int efx_probe_nic(struct efx_nic *efx) return 0; -fail: +fail2: + efx_remove_interrupts(efx); +fail1: efx->type->remove(efx); return rc; } @@ -1522,6 +1636,44 @@ static void efx_remove_nic(struct efx_nic *efx) efx->type->remove(efx); } +static int efx_probe_filters(struct efx_nic *efx) +{ + int rc; + + spin_lock_init(&efx->filter_lock); + + rc = efx->type->filter_table_probe(efx); + if (rc) + return rc; + +#ifdef CONFIG_RFS_ACCEL + if (efx->type->offload_features & NETIF_F_NTUPLE) { + efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, + sizeof(*efx->rps_flow_id), + GFP_KERNEL); + if (!efx->rps_flow_id) { + efx->type->filter_table_remove(efx); + return -ENOMEM; + } + } +#endif + + return 0; +} + +static void efx_remove_filters(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_flow_id); +#endif + efx->type->filter_table_remove(efx); +} + +static void efx_restore_filters(struct efx_nic *efx) +{ + efx->type->filter_table_restore(efx); +} + /************************************************************************** * * NIC startup/shutdown @@ -1588,19 +1740,22 @@ static void efx_start_all(struct efx_nic *efx) /* Check that it is appropriate to restart the interface. All * of these flags are safe to read under just the rtnl lock */ - if (efx->port_enabled || !netif_running(efx->net_dev)) + if (efx->port_enabled || !netif_running(efx->net_dev) || + efx->reset_pending) return; efx_start_port(efx); efx_start_datapath(efx); - /* Start the hardware monitor if there is one. Otherwise (we're link - * event driven), we have to poll the PHY because after an event queue - * flush, we could have a missed a link state change */ - if (efx->type->monitor != NULL) { + /* Start the hardware monitor if there is one */ + if (efx->type->monitor != NULL) queue_delayed_work(efx->workqueue, &efx->monitor_work, efx_monitor_interval); - } else { + + /* If link state detection is normally event-driven, we have + * to poll now because we could have missed a change + */ + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { mutex_lock(&efx->mac_lock); if (efx->phy_op->poll(efx)) efx_link_status_changed(efx); @@ -1608,18 +1763,10 @@ static void efx_start_all(struct efx_nic *efx) } efx->type->start_stats(efx); -} - -/* Flush all delayed work. Should only be called when no more delayed work - * will be scheduled. This doesn't flush pending online resets (efx_reset), - * since we're holding the rtnl_lock at this point. */ -static void efx_flush_all(struct efx_nic *efx) -{ - /* Make sure the hardware monitor and event self-test are stopped */ - cancel_delayed_work_sync(&efx->monitor_work); - efx_selftest_async_cancel(efx); - /* Stop scheduled port reconfigurations */ - cancel_work_sync(&efx->mac_work); + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); } /* Quiesce the hardware and software data path, and regular activity @@ -1635,14 +1782,22 @@ static void efx_stop_all(struct efx_nic *efx) if (!efx->port_enabled) return; + /* update stats before we go down so we can accurately count + * rx_nodesc_drops + */ + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); efx->type->stop_stats(efx); efx_stop_port(efx); - /* Flush efx_mac_work(), refill_workqueue, monitor_work */ - efx_flush_all(efx); - - /* Stop the kernel transmit interface late, so the watchdog - * timer isn't ticking over the flush */ + /* Stop the kernel transmit interface. This is only valid if + * the device is stopped or detached; otherwise the watchdog + * may fire immediately. + */ + WARN_ON(netif_running(efx->net_dev) && + netif_device_present(efx->net_dev)); netif_tx_disable(efx->net_dev); efx_stop_datapath(efx); @@ -1780,7 +1935,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) struct mii_ioctl_data *data = if_mii(ifr); if (cmd == SIOCSHWTSTAMP) - return efx_ptp_ioctl(efx, ifr, cmd); + return efx_ptp_set_ts_config(efx, ifr); + if (cmd == SIOCGHWTSTAMP) + return efx_ptp_get_ts_config(efx, ifr); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && @@ -1905,34 +2062,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); - struct efx_mac_stats *mac_stats = &efx->mac_stats; spin_lock_bh(&efx->stats_lock); - - efx->type->update_stats(efx); - - stats->rx_packets = mac_stats->rx_packets; - stats->tx_packets = mac_stats->tx_packets; - stats->rx_bytes = mac_stats->rx_bytes; - stats->tx_bytes = mac_stats->tx_bytes; - stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; - stats->multicast = mac_stats->rx_multicast; - stats->collisions = mac_stats->tx_collision; - stats->rx_length_errors = (mac_stats->rx_gtjumbo + - mac_stats->rx_length_error); - stats->rx_crc_errors = mac_stats->rx_bad; - stats->rx_frame_errors = mac_stats->rx_align_error; - stats->rx_fifo_errors = mac_stats->rx_overflow; - stats->rx_missed_errors = mac_stats->rx_missed; - stats->tx_window_errors = mac_stats->tx_late_collision; - - stats->rx_errors = (stats->rx_length_errors + - stats->rx_crc_errors + - stats->rx_frame_errors + - mac_stats->rx_symbol_error); - stats->tx_errors = (stats->tx_window_errors + - mac_stats->tx_bad); - + efx->type->update_stats(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); return stats; @@ -1963,16 +2095,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) if (new_mtu > EFX_MAX_MTU) return -EINVAL; - efx_stop_all(efx); - netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + efx_device_detach_sync(efx); + efx_stop_all(efx); + mutex_lock(&efx->mac_lock); net_dev->mtu = new_mtu; efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); + netif_device_attach(efx->net_dev); return 0; } @@ -1980,7 +2114,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) { struct efx_nic *efx = netdev_priv(net_dev); struct sockaddr *addr = data; - char *new_addr = addr->sa_data; + u8 *new_addr = addr->sa_data; if (!is_valid_ether_addr(new_addr)) { netif_err(efx, drv, efx->net_dev, @@ -1989,7 +2123,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) return -EADDRNOTAVAIL; } - memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); + ether_addr_copy(net_dev->dev_addr, new_addr); efx_sriov_mac_address_changed(efx); /* Reconfigure the MAC */ @@ -2004,30 +2138,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) static void efx_set_rx_mode(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - struct netdev_hw_addr *ha; - union efx_multicast_hash *mc_hash = &efx->multicast_hash; - u32 crc; - int bit; - - efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); - - /* Build multicast hash table */ - if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { - memset(mc_hash, 0xff, sizeof(*mc_hash)); - } else { - memset(mc_hash, 0x00, sizeof(*mc_hash)); - netdev_for_each_mc_addr(ha, net_dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); - __set_bit_le(bit, mc_hash); - } - - /* Broadcast packets go through the multicast hash filter. - * ether_crc_le() of the broadcast address is 0xbe2612ff - * so we always add bit 0xff to the mask. - */ - __set_bit_le(0xff, mc_hash); - } if (efx->port_enabled) queue_work(efx->workqueue, &efx->mac_work); @@ -2040,12 +2150,12 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) /* If disabling RX n-tuple filtering, clear existing filters */ if (net_dev->features & ~data & NETIF_F_NTUPLE) - efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); return 0; } -static const struct net_device_ops efx_netdev_ops = { +static const struct net_device_ops efx_farch_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, .ndo_get_stats64 = efx_net_stats, @@ -2072,6 +2182,26 @@ static const struct net_device_ops efx_netdev_ops = { #endif }; +static const struct net_device_ops efx_ef10_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, + .ndo_get_stats64 = efx_net_stats, + .ndo_tx_timeout = efx_watchdog, + .ndo_start_xmit = efx_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = efx_ioctl, + .ndo_change_mtu = efx_change_mtu, + .ndo_set_mac_address = efx_set_mac_address, + .ndo_set_rx_mode = efx_set_rx_mode, + .ndo_set_features = efx_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = efx_netpoll, +#endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif +}; + static void efx_update_name(struct efx_nic *efx) { strcpy(efx->name, efx->net_dev->name); @@ -2082,9 +2212,10 @@ static void efx_update_name(struct efx_nic *efx) static int efx_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *net_dev = ptr; + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); - if (net_dev->netdev_ops == &efx_netdev_ops && + if ((net_dev->netdev_ops == &efx_farch_netdev_ops || + net_dev->netdev_ops == &efx_ef10_netdev_ops) && event == NETDEV_CHANGENAME) efx_update_name(netdev_priv(net_dev)); @@ -2101,7 +2232,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", efx->phy_type); } -static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); +static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); static int efx_register_netdev(struct efx_nic *efx) { @@ -2111,8 +2242,13 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; - net_dev->netdev_ops = &efx_netdev_ops; - SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { + net_dev->netdev_ops = &efx_ef10_netdev_ops; + net_dev->priv_flags |= IFF_UNICAST_FLT; + } else { + net_dev->netdev_ops = &efx_farch_netdev_ops; + } + net_dev->ethtool_ops = &efx_ethtool_ops; net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; rtnl_lock(); @@ -2148,6 +2284,8 @@ static int efx_register_netdev(struct efx_nic *efx) efx_init_tx_queue_core_txq(tx_queue); } + efx_associate(efx); + rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); @@ -2161,6 +2299,7 @@ static int efx_register_netdev(struct efx_nic *efx) fail_registered: rtnl_lock(); + efx_dissociate(efx); unregister_netdevice(net_dev); fail_locked: efx->state = STATE_UNINIT; @@ -2171,22 +2310,11 @@ fail_locked: static void efx_unregister_netdev(struct efx_nic *efx) { - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - if (!efx->net_dev) return; BUG_ON(netdev_priv(efx->net_dev) != efx); - /* Free up any skbs still remaining. This has to happen before - * we try to unregister the netdev as running their destructors - * may be needed to get the device ref. count to 0. */ - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_release_tx_buffers(tx_queue); - } - strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); @@ -2208,8 +2336,11 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) { EFX_ASSERT_RESET_SERIALISED(efx); + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->prepare_flr(efx); + efx_stop_all(efx); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); mutex_lock(&efx->mac_lock); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) @@ -2228,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) EFX_ASSERT_RESET_SERIALISED(efx); + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->finish_flr(efx); + + /* Ensure that SRAM is initialised even if we're disabling the device */ rc = efx->type->init(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); @@ -2246,9 +2381,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) "could not restore PHY settings\n"); } - efx->type->reconfigure_mac(efx); - - efx_start_interrupts(efx, false); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; efx_restore_filters(efx); efx_sriov_reset(efx); @@ -2279,7 +2414,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", RESET_TYPE(method)); - netif_device_detach(efx->net_dev); + efx_device_detach_sync(efx); efx_reset_down(efx, method); rc = efx->type->reset(efx, method); @@ -2291,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) /* Clear flags for the scopes we covered. We assume the NIC and * driver are now quiescent so that there is no race here. */ - efx->reset_pending &= -(1 << (method + 1)); + if (method < RESET_TYPE_MAX_METHOD) + efx->reset_pending &= -(1 << (method + 1)); + else /* it doesn't fit into the well-ordered scope hierarchy */ + __clear_bit(method, &efx->reset_pending); /* Reinitialise bus-mastering, which may have been turned off before * the reset was scheduled. This is still appropriate, even in the @@ -2301,7 +2439,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) out: /* Leave device stopped if necessary */ - disabled = rc || method == RESET_TYPE_DISABLE; + disabled = rc || + method == RESET_TYPE_DISABLE || + method == RESET_TYPE_RECOVER_OR_DISABLE; rc2 = efx_reset_up(efx, method, !disabled); if (rc2) { disabled = true; @@ -2320,13 +2460,69 @@ out: return rc; } +/* Try recovery mechanisms. + * For now only EEH is supported. + * Returns 0 if the recovery mechanisms are unsuccessful. + * Returns a non-zero value otherwise. + */ +int efx_try_recovery(struct efx_nic *efx) +{ +#ifdef CONFIG_EEH + /* A PCI error can occur and not be seen by EEH because nothing + * happens on the PCI bus. In this case the driver may fail and + * schedule a 'recover or reset', leading to this recovery handler. + * Manually call the eeh failure check function. + */ + struct eeh_dev *eehdev = + of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); + + if (eeh_dev_check_failure(eehdev)) { + /* The EEH mechanisms will handle the error and reset the + * device if necessary. + */ + return 1; + } +#endif + return 0; +} + +static void efx_wait_for_bist_end(struct efx_nic *efx) +{ + int i; + + for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { + if (efx_mcdi_poll_reboot(efx)) + goto out; + msleep(BIST_WAIT_DELAY_MS); + } + + netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); +out: + /* Either way unset the BIST flag. If we found no reboot we probably + * won't recover, but we should try. + */ + efx->mc_bist_for_other_fn = false; +} + /* The worker thread exists so that code that cannot sleep can * schedule a reset for later. */ static void efx_reset_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); - unsigned long pending = ACCESS_ONCE(efx->reset_pending); + unsigned long pending; + enum reset_type method; + + pending = ACCESS_ONCE(efx->reset_pending); + method = fls(pending) - 1; + + if (method == RESET_TYPE_MC_BIST) + efx_wait_for_bist_end(efx); + + if ((method == RESET_TYPE_RECOVER_OR_DISABLE || + method == RESET_TYPE_RECOVER_OR_ALL) && + efx_try_recovery(efx)) + return; if (!pending) return; @@ -2338,7 +2534,7 @@ static void efx_reset_work(struct work_struct *data) * it cannot change again. */ if (efx->state == STATE_READY) - (void)efx_reset(efx, fls(pending) - 1); + (void)efx_reset(efx, method); rtnl_unlock(); } @@ -2347,11 +2543,22 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) { enum reset_type method; + if (efx->state == STATE_RECOVERY) { + netif_dbg(efx, drv, efx->net_dev, + "recovering: skip scheduling %s reset\n", + RESET_TYPE(type)); + return; + } + switch (type) { case RESET_TYPE_INVISIBLE: case RESET_TYPE_ALL: + case RESET_TYPE_RECOVER_OR_ALL: case RESET_TYPE_WORLD: case RESET_TYPE_DISABLE: + case RESET_TYPE_RECOVER_OR_DISABLE: + case RESET_TYPE_MC_BIST: + case RESET_TYPE_MCDI_TIMEOUT: method = type; netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", RESET_TYPE(method)); @@ -2398,6 +2605,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { .driver_data = (unsigned long) &siena_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ .driver_data = (unsigned long) &siena_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, {0} /* end of list */ }; @@ -2443,6 +2652,8 @@ static int efx_init_struct(struct efx_nic *efx, int i; /* Initialise common structures */ + INIT_LIST_HEAD(&efx->node); + INIT_LIST_HEAD(&efx->secondary_list); spin_lock_init(&efx->biu_lock); #ifdef CONFIG_SFC_MTD INIT_LIST_HEAD(&efx->mtd_list); @@ -2456,6 +2667,13 @@ static int efx_init_struct(struct efx_nic *efx, strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); efx->net_dev = net_dev; + efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; + efx->rx_packet_hash_offset = + efx->type->rx_hash_offset - efx->type->rx_prefix_size; + efx->rx_packet_ts_offset = + efx->type->rx_ts_offset - efx->type->rx_prefix_size; spin_lock_init(&efx->stats_lock); mutex_init(&efx->mac_lock); efx->phy_op = &efx_dummy_phy_operations; @@ -2467,10 +2685,10 @@ static int efx_init_struct(struct efx_nic *efx, efx->channel[i] = efx_alloc_channel(efx, i, NULL); if (!efx->channel[i]) goto fail; + efx->msi_context[i].efx = efx; + efx->msi_context[i].index = i; } - EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); - /* Higher numbered interrupt modes are less capable! */ efx->interrupt_mode = max(efx->type->max_interrupt_mode, interrupt_mode); @@ -2496,6 +2714,8 @@ static void efx_fini_struct(struct efx_nic *efx) for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); + kfree(efx->vpd_sn); + if (efx->workqueue) { destroy_workqueue(efx->workqueue); efx->workqueue = NULL; @@ -2519,11 +2739,7 @@ static void efx_pci_remove_main(struct efx_nic *efx) BUG_ON(efx->state == STATE_READY); cancel_work_sync(&efx->reset_work); -#ifdef CONFIG_RFS_ACCEL - free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); - efx->net_dev->rx_cpu_rmap = NULL; -#endif - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); efx_nic_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); @@ -2544,8 +2760,9 @@ static void efx_pci_remove(struct pci_dev *pci_dev) /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); + efx_dissociate(efx); dev_close(efx->net_dev); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); rtnl_unlock(); efx_sriov_fini(efx); @@ -2559,8 +2776,9 @@ static void efx_pci_remove(struct pci_dev *pci_dev) netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); efx_fini_struct(efx); - pci_set_drvdata(pci_dev, NULL); free_netdev(efx->net_dev); + + pci_disable_pcie_error_reporting(pci_dev); }; /* NIC VPD information @@ -2569,12 +2787,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev) * always appear within the first 512 bytes. */ #define SFC_VPD_LEN 512 -static void efx_print_product_vpd(struct efx_nic *efx) +static void efx_probe_vpd_strings(struct efx_nic *efx) { struct pci_dev *dev = efx->pci_dev; char vpd_data[SFC_VPD_LEN]; ssize_t vpd_size; - int i, j; + int ro_start, ro_size, i, j; /* Get the vpd data from the device */ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); @@ -2584,14 +2802,15 @@ static void efx_print_product_vpd(struct efx_nic *efx) } /* Get the Read only section */ - i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); - if (i < 0) { + ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + if (ro_start < 0) { netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); return; } - j = pci_vpd_lrdt_size(&vpd_data[i]); - i += PCI_VPD_LRDT_TAG_SIZE; + ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); + j = ro_size; + i = ro_start + PCI_VPD_LRDT_TAG_SIZE; if (i + j > vpd_size) j = vpd_size - i; @@ -2611,6 +2830,27 @@ static void efx_print_product_vpd(struct efx_nic *efx) netif_info(efx, drv, efx->net_dev, "Part Number : %.*s\n", j, &vpd_data[i]); + + i = ro_start + PCI_VPD_LRDT_TAG_SIZE; + j = ro_size; + i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); + if (i < 0) { + netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); + return; + } + + j = pci_vpd_info_field_size(&vpd_data[i]); + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (i + j > vpd_size) { + netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); + return; + } + + efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); + if (!efx->vpd_sn) + return; + + snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); } @@ -2645,10 +2885,14 @@ static int efx_pci_probe_main(struct efx_nic *efx) rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; - efx_start_interrupts(efx, false); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail6; return 0; + fail6: + efx_nic_fini_interrupt(efx); fail5: efx_fini_port(efx); fail4: @@ -2669,8 +2913,8 @@ static int efx_pci_probe_main(struct efx_nic *efx) * transmission; this is left to the first time one of the network * interfaces is brought up (i.e. efx_net_open). */ -static int __devinit efx_pci_probe(struct pci_dev *pci_dev, - const struct pci_device_id *entry) +static int efx_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) { struct net_device *net_dev; struct efx_nic *efx; @@ -2703,7 +2947,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, netif_info(efx, probe, efx->net_dev, "Solarflare NIC detected\n"); - efx_print_product_vpd(efx); + efx_probe_vpd_strings(efx); /* Set up basic I/O (BAR mappings etc) */ rc = efx_init_io(efx); @@ -2733,6 +2977,11 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, netif_warn(efx, probe, efx->net_dev, "failed to create MTDs (%d)\n", rc); + rc = pci_enable_pcie_error_reporting(pci_dev); + if (rc && rc != -EINVAL) + netif_warn(efx, probe, efx->net_dev, + "pci_enable_pcie_error_reporting failed (%d)\n", rc); + return 0; fail4: @@ -2742,7 +2991,6 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, fail2: efx_fini_struct(efx); fail1: - pci_set_drvdata(pci_dev, NULL); WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); @@ -2758,10 +3006,10 @@ static int efx_pm_freeze(struct device *dev) if (efx->state != STATE_DISABLED) { efx->state = STATE_UNINIT; - netif_device_detach(efx->net_dev); + efx_device_detach_sync(efx); efx_stop_all(efx); - efx_stop_interrupts(efx, false); + efx_disable_interrupts(efx); } rtnl_unlock(); @@ -2771,12 +3019,15 @@ static int efx_pm_freeze(struct device *dev) static int efx_pm_thaw(struct device *dev) { + int rc; struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); rtnl_lock(); if (efx->state != STATE_DISABLED) { - efx_start_interrupts(efx, false); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; mutex_lock(&efx->mac_lock); efx->phy_op->reconfigure(efx); @@ -2797,6 +3048,11 @@ static int efx_pm_thaw(struct device *dev) queue_work(reset_workqueue, &efx->reset_work); return 0; + +fail: + rtnl_unlock(); + + return rc; } static int efx_pm_poweroff(struct device *dev) @@ -2833,8 +3089,8 @@ static int efx_pm_resume(struct device *dev) rc = efx->type->init(efx); if (rc) return rc; - efx_pm_thaw(dev); - return 0; + rc = efx_pm_thaw(dev); + return rc; } static int efx_pm_suspend(struct device *dev) @@ -2857,12 +3113,112 @@ static const struct dev_pm_ops efx_pm_ops = { .restore = efx_pm_resume, }; +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_RECOVERY; + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + + status = PCI_ERS_RESULT_NEED_RESET; + } else { + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successfull reset, which will be performed later in efx_io_resume. */ +static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + int rc; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + rc = pci_cleanup_aer_uncorrect_error_status(pdev); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); + /* Non-fatal error. Continue. */ + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = STATE_READY; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +static struct pci_error_handlers efx_err_handlers = { + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; + static struct pci_driver efx_pci_driver = { .name = KBUILD_MODNAME, .id_table = efx_pci_table, .probe = efx_pci_probe, .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, + .err_handler = &efx_err_handlers, }; /************************************************************************** @@ -2927,6 +3283,6 @@ module_exit(efx_exit_module); MODULE_AUTHOR("Solarflare Communications and " "Michael Brown <mbrown@fensystems.co.uk>"); -MODULE_DESCRIPTION("Solarflare Communications network driver"); +MODULE_DESCRIPTION("Solarflare network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table); |
