aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/e1000/e1000_main.c')
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c135
1 files changed, 54 insertions, 81 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8502c625dbe..660971f304b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -166,8 +166,10 @@ static void e1000_vlan_mode(struct net_device *netdev,
netdev_features_t features);
static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
bool filter_on);
-static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
#ifdef CONFIG_PM
@@ -333,7 +335,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
if (!test_bit(vid, adapter->active_vlans)) {
if (hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
- e1000_vlan_rx_add_vid(netdev, vid);
+ e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
adapter->mng_vlan_id = vid;
} else {
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -341,7 +343,8 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) &&
!test_bit(old_vid, adapter->active_vlans))
- e1000_vlan_rx_kill_vid(netdev, old_vid);
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ old_vid);
} else {
adapter->mng_vlan_id = vid;
}
@@ -491,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
{
set_bit(__E1000_DOWN, &adapter->flags);
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
-
cancel_delayed_work_sync(&adapter->watchdog_task);
+
+ /*
+ * Since the watchdog task can reschedule other tasks, we should cancel
+ * it first, otherwise we can run into the situation when a work is
+ * still running after the adapter has been turned down.
+ */
+
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
+
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -541,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
-static void e1000_reinit_safe(struct e1000_adapter *adapter)
-{
- while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
- msleep(1);
- mutex_lock(&adapter->mutex);
- e1000_down(adapter);
- e1000_up(adapter);
- mutex_unlock(&adapter->mutex);
- clear_bit(__E1000_RESETTING, &adapter->flags);
-}
-
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
- /* if rtnl_lock is not held the call path is bogus */
- ASSERT_RTNL();
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -809,10 +806,10 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
/* Since there is no support for separate Rx/Tx vlan accel
* enable/disable make sure Tx flag is always in same state as Rx.
*/
- if (features & NETIF_F_HW_VLAN_RX)
- features |= NETIF_F_HW_VLAN_TX;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
else
- features &= ~NETIF_F_HW_VLAN_TX;
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
@@ -823,7 +820,7 @@ static int e1000_set_features(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
- if (changed & NETIF_F_HW_VLAN_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
e1000_vlan_mode(netdev, features);
if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
@@ -1015,19 +1012,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* according to DMA-API-HOWTO, coherent calls will always
- * succeed if the set call did
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
netdev->netdev_ops = &e1000_netdev_ops;
@@ -1058,9 +1050,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac_type >= e1000_82543) {
netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_RX;
- netdev->features = NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
}
if ((hw->mac_type >= e1000_82544) &&
@@ -1318,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
spin_lock_init(&adapter->stats_lock);
- mutex_init(&adapter->mutex);
set_bit(__E1000_DOWN, &adapter->flags);
@@ -1442,6 +1433,10 @@ static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -1457,7 +1452,8 @@ static int e1000_close(struct net_device *netdev)
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
!test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
+ adapter->mng_vlan_id);
}
return 0;
@@ -1516,8 +1512,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
return -ENOMEM;
}
@@ -1707,10 +1701,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
-
if (!rxdr->desc) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1729,8 +1720,6 @@ setup_rx_desc_die:
if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma);
- e_err(probe, "Unable to allocate memory for the Rx "
- "descriptor ring\n");
goto setup_rx_desc_die;
}
@@ -2333,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
phy_info_task.work);
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
- mutex_lock(&adapter->mutex);
+
e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
- mutex_unlock(&adapter->mutex);
}
/**
@@ -2353,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 tctl;
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
- mutex_lock(&adapter->mutex);
if (atomic_read(&adapter->tx_fifo_stall)) {
if ((er32(TDT) == er32(TDH)) &&
(er32(TDFT) == er32(TDFH)) &&
@@ -2376,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
schedule_delayed_work(&adapter->fifo_stall_task, 1);
}
}
- mutex_unlock(&adapter->mutex);
}
bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2430,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work)
struct e1000_tx_ring *txdr = adapter->tx_ring;
u32 link, tctl;
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
-
- mutex_lock(&adapter->mutex);
link = e1000_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link)
goto link_up;
@@ -2524,7 +2502,7 @@ link_up:
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
- goto unlock;
+ return;
}
}
@@ -2552,9 +2530,6 @@ link_up:
/* Reschedule the task */
if (!test_bit(__E1000_DOWN, &adapter->flags))
schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
-
-unlock:
- mutex_unlock(&adapter->mutex);
}
enum latency_range {
@@ -2707,14 +2682,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
- int err;
if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
@@ -3131,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
*/
tx_ring = adapter->tx_ring;
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
* packets may get corrupted during padding by HW.
* To WA this issue, pad all small packets manually.
@@ -3503,10 +3472,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
e_err(drv, "Reset adapter\n");
- e1000_reinit_safe(adapter);
+ e1000_reinit_locked(adapter);
}
/**
@@ -3920,8 +3887,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n",
- (unsigned long)((tx_ring - adapter->tx_ring) /
- sizeof(struct e1000_tx_ring)),
+ (unsigned long)(tx_ring - adapter->tx_ring),
readl(hw->hw_addr + tx_ring->tdh),
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
@@ -4006,7 +3972,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
if (status & E1000_RXD_STAT_VP) {
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
- __vlan_hwaccel_put_tag(skb, vid);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
napi_gro_receive(&adapter->napi, skb);
}
@@ -4792,7 +4758,7 @@ static void __e1000_vlan_mode(struct e1000_adapter *adapter,
u32 ctrl;
ctrl = er32(CTRL);
- if (features & NETIF_F_HW_VLAN_RX) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX) {
/* enable VLAN tag insert/strip */
ctrl |= E1000_CTRL_VME;
} else {
@@ -4844,7 +4810,8 @@ static void e1000_vlan_mode(struct net_device *netdev,
e1000_irq_enable(adapter);
}
-static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4869,7 +4836,8 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
return 0;
}
-static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4903,7 +4871,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
e1000_vlan_filter_on_off(adapter, true);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
@@ -4970,6 +4938,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev)) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ usleep_range(10000, 20000);
+
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
}