aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r--drivers/net/igb/igb_main.c269
1 files changed, 173 insertions, 96 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index adb09d32625..d2639c4a086 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -65,6 +65,7 @@ static struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
@@ -93,13 +94,15 @@ static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
static void igb_clean_tx_ring(struct igb_ring *);
static void igb_clean_rx_ring(struct igb_ring *);
-static void igb_set_multi(struct net_device *);
+static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
-static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
- struct igb_ring *);
-static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
+static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
+ struct net_device *,
+ struct igb_ring *);
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
+ struct net_device *);
static struct net_device_stats *igb_get_stats(struct net_device *);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
@@ -127,7 +130,7 @@ static void igb_restore_vlan(struct igb_adapter *);
static void igb_ping_all_vfs(struct igb_adapter *);
static void igb_msg_task(struct igb_adapter *);
static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
-static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
+static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
@@ -151,6 +154,12 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
+ /* if it isn't the PF check to see if VFs are enabled and
+ * increase the size to support vlan tags */
+ if (vfn < adapter->vfs_allocated_count &&
+ adapter->vf_data[vfn].vlans_enabled)
+ size += VLAN_TAG_SIZE;
+
vmolr = rd32(E1000_VMOLR(vfn));
vmolr &= ~E1000_VMOLR_RLPML_MASK;
vmolr |= size | E1000_VMOLR_LPE;
@@ -818,9 +827,11 @@ static void igb_irq_disable(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
if (adapter->msix_entries) {
- wr32(E1000_EIAM, 0);
- wr32(E1000_EIMC, ~0);
- wr32(E1000_EIAC, 0);
+ u32 regval = rd32(E1000_EIAM);
+ wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
+ wr32(E1000_EIMC, adapter->eims_enable_mask);
+ regval = rd32(E1000_EIAC);
+ wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
}
wr32(E1000_IAM, 0);
@@ -838,8 +849,10 @@ static void igb_irq_enable(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
if (adapter->msix_entries) {
- wr32(E1000_EIAC, adapter->eims_enable_mask);
- wr32(E1000_EIAM, adapter->eims_enable_mask);
+ u32 regval = rd32(E1000_EIAC);
+ wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
+ regval = rd32(E1000_EIAM);
+ wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
wr32(E1000_EIMS, adapter->eims_enable_mask);
if (adapter->vfs_allocated_count)
wr32(E1000_MBVFIMR, 0xFF);
@@ -925,7 +938,7 @@ static void igb_configure(struct igb_adapter *adapter)
int i;
igb_get_hw_control(adapter);
- igb_set_multi(netdev);
+ igb_set_rx_mode(netdev);
igb_restore_vlan(adapter);
@@ -1129,7 +1142,7 @@ void igb_reset(struct igb_adapter *adapter)
}
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
- fc->type = fc->original_type;
+ fc->current_mode = fc->requested_mode;
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
@@ -1166,7 +1179,8 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_stop = igb_close,
.ndo_start_xmit = igb_xmit_frame_adv,
.ndo_get_stats = igb_get_stats,
- .ndo_set_multicast_list = igb_set_multi,
+ .ndo_set_rx_mode = igb_set_rx_mode,
+ .ndo_set_multicast_list = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac,
.ndo_change_mtu = igb_change_mtu,
.ndo_do_ioctl = igb_ioctl,
@@ -1379,6 +1393,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
if (pci_using_dac)
@@ -1426,8 +1441,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
hw->mac.autoneg = true;
hw->phy.autoneg_advertised = 0x2f;
- hw->fc.original_type = e1000_fc_default;
- hw->fc.type = e1000_fc_default;
+ hw->fc.requested_mode = e1000_fc_default;
+ hw->fc.current_mode = e1000_fc_default;
adapter->itr_setting = IGB_DEFAULT_ITR;
adapter->itr = IGB_START_ITR;
@@ -2515,75 +2530,94 @@ static int igb_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
-
+ igb_rar_set(hw, hw->mac.addr, 0);
igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
return 0;
}
/**
- * igb_set_multi - Multicast and Promiscuous mode set
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-static void igb_set_multi(struct net_device *netdev)
+static void igb_set_rx_mode(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
- struct dev_mc_list *mc_ptr;
+ unsigned int rar_entries = hw->mac.rar_entry_count -
+ (adapter->vfs_allocated_count + 1);
+ struct dev_mc_list *mc_ptr = netdev->mc_list;
u8 *mta_list = NULL;
u32 rctl;
int i;
/* Check for Promiscuous and All Multicast modes */
-
rctl = rd32(E1000_RCTL);
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
rctl &= ~E1000_RCTL_VFE;
} else {
- if (netdev->flags & IFF_ALLMULTI) {
+ if (netdev->flags & IFF_ALLMULTI)
rctl |= E1000_RCTL_MPE;
+ else
+ rctl &= ~E1000_RCTL_MPE;
+
+ if (netdev->uc.count > rar_entries)
+ rctl |= E1000_RCTL_UPE;
+ else
rctl &= ~E1000_RCTL_UPE;
- } else
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
rctl |= E1000_RCTL_VFE;
}
wr32(E1000_RCTL, rctl);
- if (netdev->mc_count) {
- mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
- if (!mta_list) {
- dev_err(&adapter->pdev->dev,
- "failed to allocate multicast filter list\n");
- return;
+ if (netdev->uc.count && rar_entries) {
+ struct netdev_hw_addr *ha;
+ list_for_each_entry(ha, &netdev->uc.list, list) {
+ if (!rar_entries)
+ break;
+ igb_rar_set(hw, ha->addr, rar_entries);
+ igb_set_rah_pool(hw, adapter->vfs_allocated_count,
+ rar_entries);
+ rar_entries--;
}
}
+ /* write the addresses in reverse order to avoid write combining */
+ for (; rar_entries > 0 ; rar_entries--) {
+ wr32(E1000_RAH(rar_entries), 0);
+ wr32(E1000_RAL(rar_entries), 0);
+ }
+ wrfl();
- /* The shared function expects a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
+ if (!netdev->mc_count) {
+ /* nothing to program, so clear mc list */
+ igb_update_mc_addr_list(hw, NULL, 0);
+ igb_restore_vf_multicasts(adapter);
+ return;
+ }
+
+ mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!mta_list) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate multicast filter list\n");
+ return;
+ }
+ /* The shared function expects a packed array of only addresses. */
for (i = 0; i < netdev->mc_count; i++) {
if (!mc_ptr)
break;
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
mc_ptr = mc_ptr->next;
}
- igb_update_mc_addr_list(hw, mta_list, i,
- adapter->vfs_allocated_count + 1,
- mac->rar_entry_count);
-
- igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
- igb_restore_vf_multicasts(adapter);
-
+ igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
+ igb_restore_vf_multicasts(adapter);
}
/* Need to wait a few seconds after link up to get diagnostic information from
@@ -2618,10 +2652,6 @@ static bool igb_has_link(struct igb_adapter *adapter)
link_active = true;
}
break;
- case e1000_media_type_fiber:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
- break;
case e1000_media_type_internal_serdes:
ret_val = hw->mac.ops.check_for_link(hw);
link_active = hw->mac.serdes_has_link;
@@ -3298,9 +3328,9 @@ static int igb_maybe_stop_tx(struct net_device *netdev,
return __igb_maybe_stop_tx(netdev, tx_ring, size);
}
-static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
- struct net_device *netdev,
- struct igb_ring *tx_ring)
+static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct igb_ring *tx_ring)
{
struct igb_adapter *adapter = netdev_priv(netdev);
unsigned int first;
@@ -3388,7 +3418,8 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *tx_ring;
@@ -3401,7 +3432,7 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
* to a flow. Right now, performance is impacted slightly negatively
* if using multiple tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again. */
- return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
+ return igb_xmit_frame_ring_adv(skb, netdev, tx_ring);
}
/**
@@ -3938,7 +3969,7 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
vf_data->vf_mc_hashes[i] = hash_list[i];;
/* Flush and reset the mta with the new values */
- igb_set_multi(adapter->netdev);
+ igb_set_rx_mode(adapter->netdev);
return 0;
}
@@ -3981,6 +4012,8 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
wr32(E1000_VLVF(i), reg);
}
+
+ adapter->vf_data[vf].vlans_enabled = 0;
}
static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
@@ -4029,6 +4062,22 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
reg |= vid;
wr32(E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+ return 0;
+
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+ reg = rd32(E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size += 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+ wr32(E1000_VMOLR(vf), reg);
+ }
+ adapter->vf_data[vf].vlans_enabled++;
+
return 0;
}
} else {
@@ -4041,6 +4090,21 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
igb_vfta_set(hw, vid, false);
}
wr32(E1000_VLVF(i), reg);
+
+ /* do not modify RLPML for PF devices */
+ if (vf >= adapter->vfs_allocated_count)
+ return 0;
+
+ adapter->vf_data[vf].vlans_enabled--;
+ if (!adapter->vf_data[vf].vlans_enabled) {
+ u32 size;
+ reg = rd32(E1000_VMOLR(vf));
+ size = reg & E1000_VMOLR_RLPML_MASK;
+ size -= 4;
+ reg &= ~E1000_VMOLR_RLPML_MASK;
+ reg |= size;
+ wr32(E1000_VMOLR(vf), reg);
+ }
return 0;
}
}
@@ -4072,13 +4136,14 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
adapter->vf_data[vf].num_vf_mc_hashes = 0;
/* Flush and reset the mta with the new values */
- igb_set_multi(adapter->netdev);
+ igb_set_rx_mode(adapter->netdev);
}
static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
u32 reg, msgbuf[3];
u8 *addr = (u8 *)(&msgbuf[1]);
@@ -4086,8 +4151,8 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
igb_vf_reset_event(adapter, vf);
/* set vf mac address */
- igb_rar_set(hw, vf_mac, vf + 1);
- igb_set_rah_pool(hw, vf, vf + 1);
+ igb_rar_set(hw, vf_mac, rar_entry);
+ igb_set_rah_pool(hw, vf, rar_entry);
/* enable transmit and receive for vf */
reg = rd32(E1000_VFTE);
@@ -4542,6 +4607,20 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
adapter->hw_csum_good++;
}
+static inline u16 igb_get_hlen(struct igb_adapter *adapter,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
+ E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > adapter->rx_ps_hdr_size)
+ hlen = adapter->rx_ps_hdr_size;
+ return hlen;
+}
+
static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
int *work_done, int budget)
{
@@ -4556,7 +4635,8 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
int cleaned_count = 0;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int i;
- u32 length, hlen, staterr;
+ u32 staterr;
+ u16 length;
i = rx_ring->next_to_clean;
buffer_info = &rx_ring->buffer_info[i];
@@ -4593,17 +4673,8 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
goto send_up;
}
- /* HW will not DMA in data larger than the given buffer, even
- * if it parses the (NFS, of course) header to be larger. In
- * that case, it fills the header buffer and spills the rest
- * into the page.
- */
- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > adapter->rx_ps_hdr_size)
- hlen = adapter->rx_ps_hdr_size;
-
- if (!skb_shinfo(skb)->nr_frags) {
+ if (buffer_info->dma) {
+ u16 hlen = igb_get_hlen(adapter, rx_desc);
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_ps_hdr_size,
PCI_DMA_FROMDEVICE);
@@ -4843,8 +4914,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
data->phy_id = adapter->hw.phy.addr;
break;
case SIOCGMIIREG:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
&data->val_out))
return -EIO;
@@ -5033,6 +5102,34 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return 0;
+}
+
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -E1000_ERR_CONFIG;
+
+ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+
+ return 0;
+}
+
static void igb_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
@@ -5136,14 +5233,6 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
mac->autoneg = 0;
- /* Fiber NICs only allow 1000 gbps Full duplex */
- if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
- spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- dev_err(&adapter->pdev->dev,
- "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
- }
-
switch (spddplx) {
case SPEED_10 + DUPLEX_HALF:
mac->forced_speed_duplex = ADVERTISE_10_HALF;
@@ -5202,7 +5291,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (wufc) {
igb_setup_rctl(adapter);
- igb_set_multi(netdev);
+ igb_set_rx_mode(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & E1000_WUFC_MC) {
@@ -5231,7 +5320,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = wufc || adapter->en_mng_pt;
if (!*enable_wake)
- igb_shutdown_fiber_serdes_link_82575(hw);
+ igb_shutdown_serdes_link_82575(hw);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
@@ -5452,29 +5541,17 @@ static void igb_io_resume(struct pci_dev *pdev)
igb_get_hw_control(adapter);
}
-static void igb_set_mc_list_pools(struct igb_adapter *adapter,
- int entry_count, u16 total_rar_filters)
-{
- struct e1000_hw *hw = &adapter->hw;
- int i = adapter->vfs_allocated_count + 1;
-
- if ((i + entry_count) < total_rar_filters)
- total_rar_filters = i + entry_count;
-
- for (; i < total_rar_filters; i++)
- igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
-}
-
static int igb_set_vf_mac(struct igb_adapter *adapter,
int vf, unsigned char *mac_addr)
{
struct e1000_hw *hw = &adapter->hw;
- int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
-
- igb_rar_set(hw, mac_addr, rar_entry);
+ /* VF MAC addresses start at end of receive addresses and moves
+ * torwards the first, as a result a collision should not be possible */
+ int rar_entry = hw->mac.rar_entry_count - (vf + 1);
memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+ igb_rar_set(hw, mac_addr, rar_entry);
igb_set_rah_pool(hw, vf, rar_entry);
return 0;