diff options
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 3 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_82599.c | 7 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.c | 16 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_common.h | 5 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 138 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 602 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_sriov.c | 137 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_sriov.h | 8 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_type.h | 8 |
10 files changed, 746 insertions, 182 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 79c35ae3718..d0ea3d6dea9 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -111,7 +111,10 @@ struct vf_data_storage { u16 default_vf_vlan_id; u16 vlans_enabled; bool clear_to_send; + bool pf_set_mac; int rar; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; }; /* wrapper around a pointer to a socket buffer, diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 12fc0e7ba2c..38c384031c4 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -642,6 +642,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, s32 i, j; bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + struct ixgbe_adapter *adapter = hw->back; hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n"); @@ -726,6 +727,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, autoneg_wait_to_complete); out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) + netif_info(adapter, hw, adapter->netdev, "Smartspeed has" + " downgraded the link speed from the maximum" + " advertised\n"); return status; } @@ -1303,7 +1308,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) } if (i >= IXGBE_FDIRCMD_CMD_POLL) { hw_dbg(hw ,"Flow Director previous command isn't complete, " - "aborting table re-initialization. \n"); + "aborting table re-initialization.\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index eb49020903c..6eb5814ca7d 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1484,26 +1484,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) /** * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses * @hw: pointer to hardware structure - * @mc_addr_list: the list of new multicast addresses - * @mc_addr_count: number of addresses - * @next: iterator function to walk the multicast address list + * @netdev: pointer to net device structure * * The given list replaces any existing list. Clears the MC addrs from receive * address registers and the multicast table. Uses unused receive address * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, ixgbe_mc_addr_itr next) +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev) { + struct netdev_hw_addr *ha; u32 i; - u32 vmdq; /* * Set the new number of MC addresses that we are being requested to * use. */ - hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); hw->addr_ctrl.mta_in_use = 0; /* Clear the MTA */ @@ -1512,9 +1510,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); /* Add the new addresses */ - for (i = 0; i < mc_addr_count; i++) { + netdev_for_each_mc_addr(ha, netdev) { hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + ixgbe_set_mta(hw, ha->addr); } /* Enable mta */ diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 13606d4809c..264eef575cd 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -56,9 +56,8 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, - ixgbe_mc_addr_itr func); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev); s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 8f461d5cee7..dc7fd5b70bc 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, else fc.disable_fc_autoneg = false; - if (pause->rx_pause && pause->tx_pause) + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) fc.requested_mode = ixgbe_fc_full; else if (pause->rx_pause && !pause->tx_pause) fc.requested_mode = ixgbe_fc_rx_pause; @@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) struct ixgbe_tx_buffer *buf = &(tx_ring->tx_buffer_info[i]); if (buf->dma) - pci_unmap_single(pdev, buf->dma, buf->length, - PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, buf->dma, + buf->length, DMA_TO_DEVICE); if (buf->skb) dev_kfree_skb(buf->skb); } @@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) struct ixgbe_rx_buffer *buf = &(rx_ring->rx_buffer_info[i]); if (buf->dma) - pci_unmap_single(pdev, buf->dma, + dma_unmap_single(&pdev->dev, buf->dma, IXGBE_RXBUFFER_2048, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (buf->skb) dev_kfree_skb(buf->skb); } } if (tx_ring->desc) { - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, - tx_ring->dma); + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); tx_ring->desc = NULL; } if (rx_ring->desc) { - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, - rx_ring->dma); + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); rx_ring->desc = NULL; } @@ -1520,8 +1520,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, - &tx_ring->dma))) { + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!(tx_ring->desc)) { ret_val = 2; goto err_nomem; } @@ -1563,8 +1564,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) tx_ring->tx_buffer_info[i].skb = skb; tx_ring->tx_buffer_info[i].length = skb->len; tx_ring->tx_buffer_info[i].dma = - pci_map_single(pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); desc->read.buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma); desc->read.cmd_type_len = cpu_to_le32(skb->len); @@ -1593,8 +1594,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, - &rx_ring->dma))) { + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!(rx_ring->desc)) { ret_val = 5; goto err_nomem; } @@ -1661,8 +1663,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) skb_reserve(skb, NET_IP_ALIGN); rx_ring->rx_buffer_info[i].skb = skb; rx_ring->rx_buffer_info[i].dma = - pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, - PCI_DMA_FROMDEVICE); + dma_map_single(&pdev->dev, skb->data, + IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE); rx_desc->read.pkt_addr = cpu_to_le64(rx_ring->rx_buffer_info[i].dma); memset(skb->data, 0x00, skb->len); @@ -1775,10 +1777,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) ixgbe_create_lbtest_frame( tx_ring->tx_buffer_info[k].skb, 1024); - pci_dma_sync_single_for_device(pdev, + dma_sync_single_for_device(&pdev->dev, tx_ring->tx_buffer_info[k].dma, tx_ring->tx_buffer_info[k].length, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (unlikely(++k == tx_ring->count)) k = 0; } @@ -1789,10 +1791,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) good_cnt = 0; do { /* receive the sent packets */ - pci_dma_sync_single_for_cpu(pdev, + dma_sync_single_for_cpu(&pdev->dev, rx_ring->rx_buffer_info[l].dma, IXGBE_RXBUFFER_2048, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); ret_val = ixgbe_check_lbtest_frame( rx_ring->rx_buffer_info[l].skb, 1024); if (!ret_val) @@ -2079,12 +2081,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev, return 0; } +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter, + struct ethtool_coalesce *ec) +{ + /* check the old value and enable RSC if necessary */ + if ((adapter->rx_itr_setting == 0) && + (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + adapter->netdev->features |= NETIF_F_LRO; + DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n", + ec->rx_coalesce_usecs); + return true; + } + return false; +} + static int ixgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; int i; + bool need_reset = false; /* don't accept tx specific changes if we've got mixed RxTx vectors */ if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count @@ -2095,11 +2117,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev, adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; if (ec->rx_coalesce_usecs > 1) { + u32 max_int; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + max_int = IXGBE_MAX_RSC_INT_RATE; + else + max_int = IXGBE_MAX_INT_RATE; + /* check the limits */ - if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || + if ((1000000/ec->rx_coalesce_usecs > max_int) || (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) return -EINVAL; + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_reenable_rsc(adapter, ec); + /* store the value in ints/second */ adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; @@ -2108,6 +2139,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, /* clear the lower bit as its used for dynamic state */ adapter->rx_itr_setting &= ~1; } else if (ec->rx_coalesce_usecs == 1) { + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_reenable_rsc(adapter, ec); + /* 1 means dynamic mode */ adapter->rx_eitr_param = 20000; adapter->rx_itr_setting = 1; @@ -2116,14 +2150,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev, * any other value means disable eitr, which is best * served by setting the interrupt rate very high */ - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE; - else - adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; + adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; adapter->rx_itr_setting = 0; + + /* + * if hardware RSC is enabled, disable it when + * setting low latency mode, to avoid errata, assuming + * that when the user set low latency mode they want + * it at the cost of anything else + */ + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + netdev->features &= ~NETIF_F_LRO; + DPRINTK(PROBE, INFO, + "rx-usecs set to 0, disabling RSC\n"); + + need_reset = true; + } } if (ec->tx_coalesce_usecs > 1) { + /* + * don't have to worry about max_int as above because + * tx vectors don't do hardware RSC (an rx function) + */ /* check the limits */ if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) @@ -2167,6 +2217,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ixgbe_write_eitr(q_vector); } + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) { + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + return 0; } @@ -2178,10 +2240,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) ethtool_op_set_flags(netdev, data); /* if state changes we need to update adapter->flags and reset */ - if ((!!(data & ETH_FLAG_LRO)) != - (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { - adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; - need_reset = true; + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { + /* + * cast both to bool and verify if they are set the same + * but only enable RSC if itr is non-zero, as + * itr=0 and RSC are mutually exclusive + */ + if (((!!(data & ETH_FLAG_LRO)) != + (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && + adapter->rx_itr_setting) { + adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + need_reset = true; + break; + default: + break; + } + } else if (!adapter->rx_itr_setting) { + netdev->features &= ~ETH_FLAG_LRO; + } } /* diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 6c00ee493a3..d1a1868df81 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -175,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; } +struct ixgbe_reg_info { + u32 ofs; + char *name; +}; + +static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { + + /* General Registers */ + {IXGBE_CTRL, "CTRL"}, + {IXGBE_STATUS, "STATUS"}, + {IXGBE_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {IXGBE_EICR, "EICR"}, + + /* RX Registers */ + {IXGBE_SRRCTL(0), "SRRCTL"}, + {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, + {IXGBE_RDLEN(0), "RDLEN"}, + {IXGBE_RDH(0), "RDH"}, + {IXGBE_RDT(0), "RDT"}, + {IXGBE_RXDCTL(0), "RXDCTL"}, + {IXGBE_RDBAL(0), "RDBAL"}, + {IXGBE_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IXGBE_TDBAL(0), "TDBAL"}, + {IXGBE_TDBAH(0), "TDBAH"}, + {IXGBE_TDLEN(0), "TDLEN"}, + {IXGBE_TDH(0), "TDH"}, + {IXGBE_TDT(0), "TDT"}, + {IXGBE_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + {} +}; + + +/* + * ixgbe_regdump - register printout routine + */ +static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) +{ + int i = 0, j = 0; + char rname[16]; + u32 regs[64]; + + switch (reginfo->ofs) { + case IXGBE_SRRCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + break; + case IXGBE_DCA_RXCTRL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + break; + case IXGBE_RDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + break; + case IXGBE_RDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + break; + case IXGBE_RDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + break; + case IXGBE_RXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + break; + case IXGBE_RDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + break; + case IXGBE_RDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + break; + case IXGBE_TDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + break; + case IXGBE_TDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + break; + case IXGBE_TDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + break; + case IXGBE_TDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + break; + case IXGBE_TDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + break; + case IXGBE_TXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + break; + default: + printk(KERN_INFO "%-15s %08x\n", reginfo->name, + IXGBE_READ_REG(hw, reginfo->ofs)); + return; + } + + for (i = 0; i < 8; i++) { + snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); + printk(KERN_ERR "%-15s ", rname); + for (j = 0; j < 8; j++) + printk(KERN_CONT "%08x ", regs[i*8+j]); + printk(KERN_CONT "\n"); + } + +} + +/* + * ixgbe_dump - Print registers, tx-rings and rx-rings + */ +static void ixgbe_dump(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_reg_info *reginfo; + int n = 0; + struct ixgbe_ring *tx_ring; + struct ixgbe_tx_buffer *tx_buffer_info; + union ixgbe_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ixgbe_ring *rx_ring; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + printk(KERN_INFO " Register Name Value\n"); + for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; + reginfo->name; reginfo++) { + ixgbe_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] " + "leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer_info = + &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)tx_buffer_info->dma, + tx_buffer_info->length, + tx_buffer_info->next_to_watch, + (u64)tx_buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "T [desc] [address 63:0 ] " + "[PlPOIdStDDt Ln] [bi->dma ] " + "leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" + " %04X %3X %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)tx_buffer_info->dma, + tx_buffer_info->length, + tx_buffer_info->next_to_watch, + (u64)tx_buffer_info->time_stamp, + tx_buffer_info->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC/U\n"); + else if (i == tx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(tx_buffer_info->dma), + tx_buffer_info->length, true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + printk(KERN_INFO "%5d %5X %5X\n", n, + rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 16 15 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); + printk(KERN_INFO "------------------------------------\n"); + printk(KERN_INFO "R [desc] [ PktBuf A0] " + "[ HeadBuf DD] [bi->dma ] [bi->skb] " + "<-- Adv Rx Read format\n"); + printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " + "[vl er S cks ln] ---------------- [bi->skb] " + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & IXGBE_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter)) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(rx_buffer_info->dma), + rx_ring->rx_buf_len, true); + + if (rx_ring->rx_buf_len + < IXGBE_RXBUFFER_2048) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt( + rx_buffer_info->page_dma + + rx_buffer_info->page_offset + ), + PAGE_SIZE/2, true); + } + } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + } + } + +exit: + return; +} + static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) { u32 ctrl_ext; @@ -266,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, { if (tx_buffer_info->dma) { if (tx_buffer_info->mapped_as_page) - pci_unmap_page(adapter->pdev, + dma_unmap_page(&adapter->pdev->dev, tx_buffer_info->dma, tx_buffer_info->length, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else - pci_unmap_single(adapter->pdev, + dma_unmap_single(&adapter->pdev->dev, tx_buffer_info->dma, tx_buffer_info->length, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_buffer_info->dma = 0; } if (tx_buffer_info->skb) { @@ -721,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, bi->page_offset ^= (PAGE_SIZE / 2); } - bi->page_dma = pci_map_page(pdev, bi->page, + bi->page_dma = dma_map_page(&pdev->dev, bi->page, bi->page_offset, (PAGE_SIZE / 2), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } if (!bi->skb) { @@ -743,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, - skb->data)); bi->skb = skb; - bi->dma = pci_map_single(pdev, skb->data, + bi->dma = dma_map_single(&pdev->dev, skb->data, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ @@ -886,16 +1225,17 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; else - pci_unmap_single(pdev, rx_buffer_info->dma, + dma_unmap_single(&pdev->dev, + rx_buffer_info->dma, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_buffer_info->dma = 0; skb_put(skb, len); } if (upper_len) { - pci_unmap_page(pdev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_buffer_info->page, @@ -937,9 +1277,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (IXGBE_RSC_CB(skb)->dma) { - pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, + dma_unmap_single(&pdev->dev, + IXGBE_RSC_CB(skb)->dma, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); IXGBE_RSC_CB(skb)->dma = 0; } if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) @@ -1190,6 +1531,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) itr_reg |= (itr_reg << 16); } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { /* + * 82599 can support a value of zero, so allow it for + * max interrupt rate, but there is an errata where it can + * not be zero with RSC + */ + if (itr_reg == 8 && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + itr_reg = 0; + + /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ @@ -2372,7 +2722,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - ixgbe_set_vmolr(hw, adapter->num_vfs); + ixgbe_set_vmolr(hw, adapter->num_vfs, true); } /* Program MRQC for the distribution of queues */ @@ -2482,12 +2832,74 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); } +/** + * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl &= ~(IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + vlnctrl &= ~IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +/** + * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + vlnctrl |= IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + for (i = 0; i < adapter->num_rx_queues; i++) { + j = adapter->rx_ring[i]->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + static void ixgbe_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 ctrl; - int i, j; if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_disable(adapter); @@ -2498,25 +2910,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev, * still receive traffic from a DCB-enabled host even if we're * not in DCB mode. */ - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); - - /* Disable CFI check */ - ctrl &= ~IXGBE_VLNCTRL_CFIEN; - - /* enable VLAN tag stripping */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - ctrl |= IXGBE_VLNCTRL_VME; - } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - for (i = 0; i < adapter->num_rx_queues; i++) { - u32 ctrl; - j = adapter->rx_ring[i]->reg_idx; - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); - } - } - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); + ixgbe_vlan_filter_enable(adapter); ixgbe_vlan_rx_add_vid(netdev, 0); @@ -2538,21 +2932,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) } } -static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) -{ - struct dev_mc_list *mc_ptr; - u8 *addr = *mc_addr_ptr; - *vmdq = 0; - - mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); - if (mc_ptr->next) - *mc_addr_ptr = mc_ptr->next->dmi_addr; - else - *mc_addr_ptr = NULL; - - return addr; -} - /** * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -2566,19 +2945,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - u32 fctrl, vlnctrl; - u8 *addr_list = NULL; - int addr_count = 0; + u32 fctrl; /* Check for Promiscuous and All Multicast modes */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = 1; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vlnctrl &= ~IXGBE_VLNCTRL_VFE; + /* don't hardware filter vlans in promisc mode */ + ixgbe_vlan_filter_disable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; @@ -2586,22 +2963,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } else { fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); } - vlnctrl |= IXGBE_VLNCTRL_VFE; + ixgbe_vlan_filter_enable(adapter); hw->addr_ctrl.user_set_promisc = 0; } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); /* reprogram secondary unicast list */ hw->mac.ops.update_uc_addr_list(hw, netdev); /* reprogram multicast list */ - addr_count = netdev_mc_count(netdev); - if (addr_count) - addr_list = netdev->mc_list->dmi_addr; - hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, - ixgbe_addr_list_itr); + hw->mac.ops.update_mc_addr_list(hw, netdev); + if (adapter->num_vfs) ixgbe_restore_vf_multicasts(adapter); } @@ -2661,7 +3034,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 txdctl, vlnctrl; + u32 txdctl; int i, j; ixgbe_dcb_check_config(&adapter->dcb_cfg); @@ -2679,22 +3052,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); } /* Enable VLAN tag insert/strip */ - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - if (hw->mac.type == ixgbe_mac_82598EB) { - vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - } else if (hw->mac.type == ixgbe_mac_82599EB) { - vlnctrl |= IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - for (i = 0; i < adapter->num_rx_queues; i++) { - j = adapter->rx_ring[i]->reg_idx; - vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); - vlnctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); - } - } + ixgbe_vlan_filter_enable(adapter); + hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); } @@ -2927,8 +3286,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i]->reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); - /* enable WTHRESH=8 descriptors, to encourage burst writeback */ - txdctl |= (8 << 16); + if (adapter->rx_itr_setting == 0) { + /* cannot set wthresh when itr==0 */ + txdctl &= ~0x007F0000; + } else { + /* enable WTHRESH=8 descriptors, to encourage burst writeback */ + txdctl |= (8 << 16); + } IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); } @@ -3131,9 +3495,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { - pci_unmap_single(pdev, rx_buffer_info->dma, + dma_unmap_single(&pdev->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_buffer_info->dma = 0; } if (rx_buffer_info->skb) { @@ -3142,9 +3506,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, do { struct sk_buff *this = skb; if (IXGBE_RSC_CB(this)->dma) { - pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, + dma_unmap_single(&pdev->dev, + IXGBE_RSC_CB(this)->dma, rx_ring->rx_buf_len, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); IXGBE_RSC_CB(this)->dma = 0; } skb = skb->prev; @@ -3154,8 +3519,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, if (!rx_buffer_info->page) continue; if (rx_buffer_info->page_dma) { - pci_unmap_page(pdev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, + PAGE_SIZE / 2, DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; } put_page(rx_buffer_info->page); @@ -3268,22 +3633,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter) rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); - netif_tx_disable(netdev); - IXGBE_WRITE_FLUSH(hw); msleep(10); netif_tx_stop_all_queues(netdev); - ixgbe_irq_disable(adapter); - - ixgbe_napi_disable_all(adapter); - clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); del_timer_sync(&adapter->sfp_timer); del_timer_sync(&adapter->watchdog_timer); cancel_work_sync(&adapter->watchdog_task); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ixgbe_irq_disable(adapter); + + ixgbe_napi_disable_all(adapter); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) cancel_work_sync(&adapter->fdir_reinit_task); @@ -3301,8 +3667,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); - netif_carrier_off(netdev); - /* clear n-tuple filters that are cached */ ethtool_ntuple_flush(netdev); @@ -3379,6 +3743,8 @@ static void ixgbe_reset_task(struct work_struct *work) adapter->tx_timeout_count++; + ixgbe_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); ixgbe_reinit_locked(adapter); } @@ -3479,12 +3845,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) adapter->num_tx_queues = 1; #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); + DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n"); ixgbe_set_dcb_queues(adapter); } #endif if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); + DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n"); if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ixgbe_set_fdir_queues(adapter); @@ -4381,8 +4747,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, - &tx_ring->dma); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; @@ -4452,7 +4818,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { DPRINTK(PROBE, ERR, @@ -4513,7 +4880,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); tx_ring->desc = NULL; } @@ -4550,7 +4918,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); rx_ring->desc = NULL; } @@ -5100,7 +5469,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) &(adapter->tx_ring[i]->reinit_state)); } else { DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " - "ignored adding FDIR ATR filters \n"); + "ignored adding FDIR ATR filters\n"); } /* Done FDIR Re-initialization, enable transmits */ netif_tx_start_all_queues(adapter->netdev); @@ -5420,10 +5789,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, tx_buffer_info->length = size; tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = pci_map_single(pdev, + tx_buffer_info->dma = dma_map_single(&pdev->dev, skb->data + offset, - size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -5456,12 +5825,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = pci_map_page(adapter->pdev, + tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, frag->page, offset, size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_buffer_info->mapped_as_page = true; - if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) + if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -5942,6 +6311,10 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, .ndo_do_ioctl = ixgbe_ioctl, + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, + .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, + .ndo_get_vf_config = ixgbe_ndo_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif @@ -6039,13 +6412,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) return err; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 1c1efd38695..d6d5b843d62 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) msleep(edata); break; case IXGBE_DATA_NL: - hw_dbg(hw, "DATA: \n"); + hw_dbg(hw, "DATA:\n"); data_offset++; hw->eeprom.ops.read(hw, data_offset++, &phy_offset); @@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) break; case IXGBE_CONTROL_NL: data_offset++; - hw_dbg(hw, "CONTROL: \n"); + hw_dbg(hw, "CONTROL:\n"); if (edata == IXGBE_CONTROL_EOL_NL) { hw_dbg(hw, "EOL\n"); end_data = true; diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index d4cd20f3019..f6cee94ec8e 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, int entries, u16 *hash_list, u32 vf) { struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct ixgbe_hw *hw = &adapter->hw; int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; /* only so many hash values supported */ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); @@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, vfinfo->vf_mc_hashes[i] = hash_list[i];; } - /* Flush and reset the mta with the new values */ - ixgbe_set_rx_mode(adapter->netdev); + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } return 0; } @@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { - u32 ctrl; - - /* Check if global VLAN already set, if not set it */ - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); - if (!(ctrl & IXGBE_VLNCTRL_VFE)) { - /* enable VLAN tag insert/strip */ - ctrl |= IXGBE_VLNCTRL_VFE; - ctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); - } - return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } -void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf) +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr |= (IXGBE_VMOLR_AUPE | - IXGBE_VMOLR_ROMPE | + vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE | IXGBE_VMOLR_BAM); + if (aupe) + vmolr |= IXGBE_VMOLR_AUPE; + else + vmolr &= ~IXGBE_VMOLR_AUPE; IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); } +static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (vid) + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), + (vid | IXGBE_VMVIR_VLANA_DEFAULT)); + else + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); +} + inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; /* reset offloads to defaults */ - ixgbe_set_vmolr(hw, vf); - + if (adapter->vfinfo[vf].pf_vlan) { + ixgbe_set_vf_vlan(adapter, true, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_set_vmvir(adapter, + (adapter->vfinfo[vf].pf_vlan | + (adapter->vfinfo[vf].pf_qos << + VLAN_PRIO_SHIFT)), vf); + ixgbe_set_vmolr(hw, vf, false); + } else { + ixgbe_set_vmvir(adapter, 0, vf); + ixgbe_set_vmolr(hw, vf, true); + } /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; @@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_SET_MAC_ADDR: { u8 *new_mac = ((u8 *)(&msgbuf[1])); - if (is_valid_ether_addr(new_mac)) + if (is_valid_ether_addr(new_mac) && + !adapter->vfinfo[vf].pf_set_mac) ixgbe_set_vf_mac(adapter, vf, new_mac); else - retval = -1; + ixgbe_set_vf_mac(adapter, + vf, adapter->vfinfo[vf].vf_mac_addresses); } break; case IXGBE_VF_SET_MULTICAST: @@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) } } +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + return ixgbe_set_vf_mac(adapter, vf, mac); +} + +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + ixgbe_set_vmolr(&adapter->hw, vf, false); + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + } else { + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_set_vmvir(adapter, vlan, vf); + ixgbe_set_vmolr(&adapter->hw, vf, true); + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + } +out: + return err; +} + +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) +{ + return -EOPNOTSUPP; +} + +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + ivi->tx_rate = 0; + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; + return 0; +} diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h index 51d1106c45a..184730ecdfb 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ixgbe/ixgbe_sriov.h @@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, int entries, u16 *hash_list, u32 vf); void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); -void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf); +void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe); void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); void ixgbe_msg_task(struct ixgbe_adapter *adapter); @@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); void ixgbe_dump_registers(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); #endif /* _IXGBE_SRIOV_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 534affcc38c..4277cbbb812 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -219,6 +219,7 @@ #define IXGBE_MTQC 0x08120 #define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_VT_CTL 0x051B0 #define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) #define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) @@ -1311,6 +1312,10 @@ #define IXGBE_VLVF_ENTRIES 64 #define IXGBE_VLVF_VLANID_MASK 0x00000FFF +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ /* STATUS Bit Masks */ @@ -2419,8 +2424,7 @@ struct ixgbe_mac_operations { s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, - ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); |