diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igbvf/netdev.c')
| -rw-r--r-- | drivers/net/ethernet/intel/igbvf/netdev.c | 341 |
1 files changed, 191 insertions, 150 deletions
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 4e9141cfe81..d608599e123 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -47,7 +47,7 @@ #include "igbvf.h" -#define DRV_VERSION "2.0.1-k" +#define DRV_VERSION "2.0.2-k" char igbvf_driver_name[] = "igbvf"; const char igbvf_driver_version[] = DRV_VERSION; static const char igbvf_driver_string[] = @@ -55,6 +55,11 @@ static const char igbvf_driver_string[] = static const char igbvf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + static int igbvf_poll(struct napi_struct *napi, int budget); static void igbvf_reset(struct igbvf_adapter *); static void igbvf_set_interrupt_capability(struct igbvf_adapter *); @@ -102,12 +107,19 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, struct sk_buff *skb, u32 status, u16 vlan) { + u16 vid; + if (status & E1000_RXD_STAT_VP) { - u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && + (status & E1000_RXDEXT_STATERR_LB)) + vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + else + vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; if (test_bit(vid, adapter->active_vlans)) - __vlan_hwaccel_put_tag(skb, vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - netif_receive_skb(skb); + + napi_gro_receive(&adapter->rx_ring->napi, skb); } static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, @@ -179,6 +191,13 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, buffer_info->page_offset, PAGE_SIZE / 2, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + buffer_info->page_dma)) { + __free_page(buffer_info->page); + buffer_info->page = NULL; + dev_err(&pdev->dev, "RX DMA map failed\n"); + break; + } } if (!buffer_info->skb) { @@ -192,6 +211,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, buffer_info->dma = dma_map_single(&pdev->dev, skb->data, bufsz, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + dev_err(&pdev->dev, "RX DMA map failed\n"); + goto no_buffers; + } } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ @@ -422,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) goto err; @@ -463,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->desc) goto err; @@ -632,14 +655,13 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. This functionality is controlled - * by the InterruptThrottleRate module parameter. + * while increasing bulk throughput. **/ -static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, - u16 itr_setting, int packets, - int bytes) +static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, + enum latency_range itr_setting, + int packets, int bytes) { - unsigned int retval = itr_setting; + enum latency_range retval = itr_setting; if (packets == 0) goto update_itr_done; @@ -675,71 +697,94 @@ static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, retval = low_latency; } break; + default: + break; } update_itr_done: return retval; } -static void igbvf_set_itr(struct igbvf_adapter *adapter) +static int igbvf_range_to_itr(enum latency_range current_range) { - struct e1000_hw *hw = &adapter->hw; - u16 current_itr; - u32 new_itr = adapter->itr; - - adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) - adapter->tx_itr = low_latency; - - adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) - adapter->rx_itr = low_latency; + int new_itr; - current_itr = max(adapter->rx_itr, adapter->tx_itr); - - switch (current_itr) { + switch (current_range) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: - new_itr = 70000; + new_itr = IGBVF_70K_ITR; break; case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ + new_itr = IGBVF_20K_ITR; break; case bulk_latency: - new_itr = 4000; + new_itr = IGBVF_4K_ITR; break; default: + new_itr = IGBVF_START_ITR; break; } + return new_itr; +} + +static void igbvf_set_itr(struct igbvf_adapter *adapter) +{ + u32 new_itr; + + adapter->tx_ring->itr_range = + igbvf_update_itr(adapter, + adapter->tx_ring->itr_val, + adapter->total_tx_packets, + adapter->total_tx_bytes); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->requested_itr == 3 && + adapter->tx_ring->itr_range == lowest_latency) + adapter->tx_ring->itr_range = low_latency; + + new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); + - if (new_itr != adapter->itr) { + if (new_itr != adapter->tx_ring->itr_val) { + u32 current_itr = adapter->tx_ring->itr_val; /* * this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ - new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; - adapter->itr = new_itr; - adapter->rx_ring->itr_val = 1952; - - if (adapter->msix_entries) - adapter->rx_ring->set_itr = 1; - else - ew32(ITR, 1952); + new_itr = new_itr > current_itr ? + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->tx_ring->itr_val = new_itr; + + adapter->tx_ring->set_itr = 1; + } + + adapter->rx_ring->itr_range = + igbvf_update_itr(adapter, adapter->rx_ring->itr_val, + adapter->total_rx_packets, + adapter->total_rx_bytes); + if (adapter->requested_itr == 3 && + adapter->rx_ring->itr_range == lowest_latency) + adapter->rx_ring->itr_range = low_latency; + + new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); + + if (new_itr != adapter->rx_ring->itr_val) { + u32 current_itr = adapter->rx_ring->itr_val; + new_itr = new_itr > current_itr ? + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->rx_ring->itr_val = new_itr; + + adapter->rx_ring->set_itr = 1; } } /** * igbvf_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure + * * returns true if ring is completely cleaned **/ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) @@ -750,20 +795,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) struct sk_buff *skb; union e1000_adv_tx_desc *tx_desc, *eop_desc; unsigned int total_bytes = 0, total_packets = 0; - unsigned int i, eop, count = 0; + unsigned int i, count = 0; bool cleaned = false; i = tx_ring->next_to_clean; - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + buffer_info = &tx_ring->buffer_info[i]; + eop_desc = buffer_info->next_to_watch; + + do { + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + buffer_info->next_to_watch = NULL; - while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && - (count < tx_ring->count)) { - rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); - buffer_info = &tx_ring->buffer_info[i]; - cleaned = (i == eop); + cleaned = (tx_desc == eop_desc); skb = buffer_info->skb; if (skb) { @@ -784,10 +840,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) i++; if (i == tx_ring->count) i = 0; + + buffer_info = &tx_ring->buffer_info[i]; } - eop = tx_ring->buffer_info[i].next_to_watch; - eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); - } + + eop_desc = buffer_info->next_to_watch; + } while (count < tx_ring->count); tx_ring->next_to_clean = i; @@ -835,6 +893,11 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *tx_ring = adapter->tx_ring; + if (tx_ring->set_itr) { + writel(tx_ring->itr_val, + adapter->hw.hw_addr + tx_ring->itr_register); + adapter->tx_ring->set_itr = 0; + } adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -937,19 +1000,10 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter) igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); adapter->eims_enable_mask |= tx_ring->eims_value; - if (tx_ring->itr_val) - writel(tx_ring->itr_val, - hw->hw_addr + tx_ring->itr_register); - else - writel(1952, hw->hw_addr + tx_ring->itr_register); - + writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); adapter->eims_enable_mask |= rx_ring->eims_value; - if (rx_ring->itr_val) - writel(rx_ring->itr_val, - hw->hw_addr + rx_ring->itr_register); - else - writel(1952, hw->hw_addr + rx_ring->itr_register); + writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); /* set vector for other causes, i.e. link changes */ @@ -989,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) for (i = 0; i < 3; i++) adapter->msix_entries[i].entry = i; - err = pci_enable_msix(adapter->pdev, - adapter->msix_entries, 3); + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, 3, 3); } - if (err) { + if (err < 0) { /* MSI-X failed */ dev_err(&adapter->pdev->dev, "Failed to initialize MSI-X interrupts.\n"); @@ -1027,7 +1081,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) goto out; adapter->tx_ring->itr_register = E1000_EITR(vector); - adapter->tx_ring->itr_val = 1952; + adapter->tx_ring->itr_val = adapter->current_itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, @@ -1037,7 +1091,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) goto out; adapter->rx_ring->itr_register = E1000_EITR(vector); - adapter->rx_ring->itr_val = 1952; + adapter->rx_ring->itr_val = adapter->current_itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, @@ -1055,7 +1109,7 @@ out: * igbvf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize **/ -static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) +static int igbvf_alloc_queues(struct igbvf_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1151,7 +1205,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete(napi); - if (adapter->itr_setting & 3) + if (adapter->requested_itr & 3) igbvf_set_itr(adapter); if (!test_bit(__IGBVF_DOWN, &adapter->state)) @@ -1176,7 +1230,8 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) e1000_rlpml_set_vf(hw, max_frame_size); } -static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +static int igbvf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1189,7 +1244,8 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) return 0; } -static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1208,7 +1264,7 @@ static void igbvf_restore_vlan(struct igbvf_adapter *adapter) u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - igbvf_vlan_rx_add_vid(adapter->netdev, vid); + igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } /** @@ -1356,12 +1412,10 @@ static void igbvf_set_multi(struct net_device *netdev) int i; if (!netdev_mc_empty(netdev)) { - mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); - if (!mta_list) { - dev_err(&adapter->pdev->dev, - "failed to allocate multicast filter list\n"); + mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN, + GFP_ATOMIC); + if (!mta_list) return; - } } /* prepare a packed array of only addresses. */ @@ -1507,7 +1561,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter) * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ -static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) +static int igbvf_sw_init(struct igbvf_adapter *adapter) { struct net_device *netdev = adapter->netdev; s32 rc; @@ -1521,8 +1575,8 @@ static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) adapter->tx_abs_int_delay = 32; adapter->rx_int_delay = 0; adapter->rx_abs_int_delay = 8; - adapter->itr_setting = 3; - adapter->itr = 20000; + adapter->requested_itr = 3; + adapter->current_itr = IGBVF_START_ITR; /* Set various function pointers */ adapter->ei->init_ops(&adapter->hw); @@ -1691,7 +1745,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) hw->mac.ops.rar_set(hw, hw->mac.addr, 0); - if (memcmp(addr->sa_data, hw->mac.addr, 6)) + if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); @@ -1856,20 +1910,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; - int err; struct igbvf_buffer *buffer_info; u32 info = 0, tu_cmd = 0; u32 mss_l4len_idx, l4len; + unsigned int i; + int err; + *hdr_len = 0; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) { - dev_err(&adapter->pdev->dev, - "igbvf_tso returning an error\n"); - return err; - } + err = skb_cow_head(skb, 0); + if (err < 0) { + dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); + return err; } l4len = tcp_hdrlen(skb); @@ -1920,7 +1972,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter, context_desc->seqnum_seed = 0; buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) @@ -1961,12 +2012,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, if (skb->ip_summed == CHECKSUM_PARTIAL) { switch (skb->protocol) { - case __constant_htons(ETH_P_IP): + case htons(ETH_P_IP): tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; break; - case __constant_htons(ETH_P_IPV6): + case htons(ETH_P_IPV6): if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; break; @@ -1980,7 +2031,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, context_desc->mss_l4len_idx = 0; buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->dma = 0; i++; if (i == tx_ring->count) @@ -2020,8 +2070,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - struct sk_buff *skb, - unsigned int first) + struct sk_buff *skb) { struct igbvf_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; @@ -2036,7 +2085,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, buffer_info->length = len; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->mapped_as_page = false; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); @@ -2059,7 +2107,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); buffer_info->length = len; buffer_info->time_stamp = jiffies; - buffer_info->next_to_watch = i; buffer_info->mapped_as_page = true; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE); @@ -2068,7 +2115,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, } tx_ring->buffer_info[i].skb = skb; - tx_ring->buffer_info[first].next_to_watch = i; return ++count; @@ -2079,7 +2125,6 @@ dma_error: buffer_info->dma = 0; buffer_info->time_stamp = 0; buffer_info->length = 0; - buffer_info->next_to_watch = 0; buffer_info->mapped_as_page = false; if (count) count--; @@ -2098,7 +2143,8 @@ dma_error: static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, - int tx_flags, int count, u32 paylen, + int tx_flags, int count, + unsigned int first, u32 paylen, u8 hdr_len) { union e1000_adv_tx_desc *tx_desc = NULL; @@ -2148,6 +2194,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, * such as IA-64). */ wmb(); + tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); /* we need this if more than one processor can write to our tail @@ -2214,11 +2261,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, * count reflects descriptors mapped, if 0 then mapping error * has occurred and we need to rewind the descriptor queue */ - count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); + count = igbvf_tx_map_adv(adapter, tx_ring, skb); if (count) { igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, - skb->len, hdr_len); + first, skb->len, hdr_len); /* Make sure there is space in the ring for the next send. */ igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); } else { @@ -2294,10 +2341,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) struct igbvf_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); + if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) return -EINVAL; - } #define MAX_STD_JUMBO_FRAME_SIZE 9234 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { @@ -2574,8 +2620,7 @@ static const struct net_device_ops igbvf_netdev_ops = { * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int __devinit igbvf_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct igbvf_adapter *adapter; @@ -2590,21 +2635,15 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, return err; pci_using_dac = 0; - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; + pci_using_dac = 1; } else { - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = dma_set_coherent_mask(&pdev->dev, - DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); - goto err_dma; - } + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; } } @@ -2631,7 +2670,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, adapter->flags = ei->flags; adapter->hw.back = adapter; adapter->hw.mac.type = ei->mac; - adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* PCI config space info */ @@ -2651,7 +2690,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, if (ei->get_variants) { err = ei->get_variants(adapter); if (err) - goto err_ioremap; + goto err_get_variants; } /* setup adapter struct */ @@ -2676,9 +2715,9 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, NETIF_F_RXCSUM; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -2693,25 +2732,22 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, - "PF still in reset state, assigning new address." - " Is the PF interface up?\n"); - dev_hw_addr_random(adapter->netdev, hw->mac.addr); + "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.read_mac_addr(hw); - if (err) { - dev_err(&pdev->dev, "Error reading MAC address\n"); - goto err_hw_init; - } + if (err) + dev_info(&pdev->dev, "Error reading MAC address.\n"); + else if (is_zero_ether_addr(adapter->hw.mac.addr)) + dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); } - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->perm_addr)) { - dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", - netdev->dev_addr); - err = -EIO; - goto err_hw_init; + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(&pdev->dev, "Assigning random MAC address.\n"); + eth_hw_addr_random(netdev); + memcpy(adapter->hw.mac.addr, netdev->dev_addr, + netdev->addr_len); } setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, @@ -2727,6 +2763,10 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, /* reset the hardware with the new settings */ igbvf_reset(adapter); + /* set hardware-specific flags */ + if (adapter->hw.mac.type == e1000_vfadapt_i350) + adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP; + strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) @@ -2747,6 +2787,7 @@ err_hw_init: kfree(adapter->rx_ring); err_sw_init: igbvf_reset_interrupt_capability(adapter); +err_get_variants: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); @@ -2767,7 +2808,7 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void __devexit igbvf_remove(struct pci_dev *pdev) +static void igbvf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2806,7 +2847,7 @@ static void __devexit igbvf_remove(struct pci_dev *pdev) } /* PCI Error Recovery (ERS) */ -static struct pci_error_handlers igbvf_err_handler = { +static const struct pci_error_handlers igbvf_err_handler = { .error_detected = igbvf_io_error_detected, .slot_reset = igbvf_io_slot_reset, .resume = igbvf_io_resume, @@ -2824,7 +2865,7 @@ static struct pci_driver igbvf_driver = { .name = igbvf_driver_name, .id_table = igbvf_pci_tbl, .probe = igbvf_probe, - .remove = __devexit_p(igbvf_remove), + .remove = igbvf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ .suspend = igbvf_suspend, |
