aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-15 16:53:15 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-15 16:53:15 -0800
commit3feeba1e53f54f726a39da254a5c41e02530255e (patch)
tree11be6e023579adb6727884f4cc105c3106a06fb4 /drivers
parent7e92214b539ea17ccaf0886d140cbba9801a4d40 (diff)
parenta58c891a53aca81c78f9cbe0572a301042470e96 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (95 commits) b44: GFP_DMA skb should not escape from driver korina: do not use IRQF_SHARED with IRQF_DISABLED korina: do not stop queue here korina: fix handling tx_chain_tail korina: do tx at the right position korina: do schedule napi after testing for it korina: rework korina_rx() for use with napi korina: disable napi on close and restart korina: reset resource buffer size to 1536 korina: fix usage of driver_data bnx2x: First slow path interrupt race bnx2x: MTU Filter bnx2x: Indirection table initialization index bnx2x: Missing brackets bnx2x: Fixing the doorbell size bnx2x: Endianness issues bnx2x: VLAN tagged packets without VLAN offload bnx2x: Protecting the link change indication bnx2x: Flow control updated before reporting the link bnx2x: Missing mask when calculating flow control ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/isdn/i4l/isdn_net.c9
-rw-r--r--drivers/net/arm/etherh.c2
-rw-r--r--drivers/net/ax88796.c27
-rw-r--r--drivers/net/b44.c4
-rw-r--r--drivers/net/b44.h2
-rw-r--r--drivers/net/bnx2x.h19
-rw-r--r--drivers/net/bnx2x_main.c178
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/gianfar.c8
-rw-r--r--drivers/net/ibm_newemac/mal.c4
-rw-r--r--drivers/net/ibm_newemac/mal.h2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/korina.c158
-rw-r--r--drivers/net/netxen/netxen_nic.h146
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c50
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c31
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c97
-rw-r--r--drivers/net/netxen/netxen_nic_init.c105
-rw-r--r--drivers/net/netxen/netxen_nic_main.c203
-rw-r--r--drivers/net/phy/phy_device.c9
-rw-r--r--drivers/net/ppp_generic.c43
-rw-r--r--drivers/net/sis900.c8
-rw-r--r--drivers/net/usb/hso.c4
-rw-r--r--drivers/net/wan/ixp4xx_hss.c6
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/ath5k/base.c8
-rw-r--r--drivers/net/wireless/ath5k/pcu.c4
-rw-r--r--drivers/net/wireless/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath9k/xmit.c48
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco.c28
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/p54/p54common.c34
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c5
50 files changed, 765 insertions, 557 deletions
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 7c5f97033b9..cb8943da4f1 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -292,7 +292,9 @@ isdn_net_unbind_channel(isdn_net_local * lp)
lp->dialstate = 0;
dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
- isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET);
+ if (lp->isdn_device != -1 && lp->isdn_channel != -1)
+ isdn_free_channel(lp->isdn_device, lp->isdn_channel,
+ ISDN_USAGE_NET);
lp->flags &= ~ISDN_NET_CONNECTED;
lp->isdn_device = -1;
lp->isdn_channel = -1;
@@ -2513,7 +2515,6 @@ static const struct net_device_ops isdn_netdev_ops = {
.ndo_stop = isdn_net_close,
.ndo_do_ioctl = isdn_net_ioctl,
- .ndo_validate_addr = NULL,
.ndo_start_xmit = isdn_net_start_xmit,
.ndo_get_stats = isdn_net_get_stats,
.ndo_tx_timeout = isdn_net_tx_timeout,
@@ -2528,12 +2529,8 @@ static void _isdn_setup(struct net_device *dev)
ether_setup(dev);
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
/* Setup the generic properties */
- dev->mtu = 1500;
dev->flags = IFF_NOARP|IFF_POINTOPOINT;
- dev->type = ARPHRD_ETHER;
- dev->addr_len = ETH_ALEN;
dev->header_ops = NULL;
dev->netdev_ops = &isdn_netdev_ops;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 745ac188bab..d15d8b79d8e 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -646,7 +646,7 @@ static const struct net_device_ops etherh_netdev_ops = {
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_addr = eth_set_mac_addr,
+ .ndo_set_mac_address = eth_set_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 337488ec707..a4eb6c40678 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -37,7 +37,10 @@ static int phy_debug = 0;
#define __ei_open ax_ei_open
#define __ei_close ax_ei_close
#define __ei_poll ax_ei_poll
+#define __ei_start_xmit ax_ei_start_xmit
#define __ei_tx_timeout ax_ei_tx_timeout
+#define __ei_get_stats ax_ei_get_stats
+#define __ei_set_multicast_list ax_ei_set_multicast_list
#define __ei_interrupt ax_ei_interrupt
#define ____alloc_ei_netdev ax__alloc_ei_netdev
#define __NS8390_init ax_NS8390_init
@@ -623,6 +626,23 @@ static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom)
}
#endif
+static const struct net_device_ops ax_netdev_ops = {
+ .ndo_open = ax_open,
+ .ndo_stop = ax_close,
+ .ndo_do_ioctl = ax_ioctl,
+
+ .ndo_start_xmit = ax_ei_start_xmit,
+ .ndo_tx_timeout = ax_ei_tx_timeout,
+ .ndo_get_stats = ax_ei_get_stats,
+ .ndo_set_multicast_list = ax_ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ax_ei_poll,
+#endif
+};
+
/* setup code */
static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
@@ -738,9 +758,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
ei_status.get_8390_hdr = &ax_get_8390_hdr;
ei_status.priv = 0;
- dev->open = ax_open;
- dev->stop = ax_close;
- dev->do_ioctl = ax_ioctl;
+ dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
ax->msg_enable = NETIF_MSG_LINK;
@@ -753,9 +771,6 @@ static int ax_init_dev(struct net_device *dev, int first_init)
ax->mii.mdio_write = ax_phy_write;
ax->mii.dev = dev;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ax_ei_poll;
-#endif
ax_NS8390_init(dev, 0);
if (first_init)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 5ae131c147f..c38512ebcea 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -679,6 +679,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dev_kfree_skb_any(skb);
return -ENOMEM;
}
+ bp->force_copybreak = 1;
}
rh = (struct rx_header *) skb->data;
@@ -800,7 +801,7 @@ static int b44_rx(struct b44 *bp, int budget)
/* Omit CRC. */
len -= 4;
- if (len > RX_COPY_THRESHOLD) {
+ if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
int skb_size;
skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
if (skb_size < 0)
@@ -2152,6 +2153,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
bp = netdev_priv(dev);
bp->sdev = sdev;
bp->dev = dev;
+ bp->force_copybreak = 0;
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 7db0c84a795..e678498de6d 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -395,7 +395,7 @@ struct b44 {
u32 rx_pending;
u32 tx_pending;
u8 phy_addr;
-
+ u8 force_copybreak;
struct mii_if_info mii_if;
};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index fd705d1295a..6fcccef4cf3 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -20,6 +20,11 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+
+
/* error/debug prints */
#define DRV_MODULE_NAME "bnx2x"
@@ -78,11 +83,6 @@
#endif
-#ifdef NETIF_F_HW_VLAN_TX
-#define BCM_VLAN 1
-#endif
-
-
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x) (u32)(((u64)(x)) >> 32)
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
@@ -150,6 +150,9 @@ struct sw_rx_page {
#define PAGES_PER_SGE_SHIFT 0
#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE PAGE_SIZE
+#define SGE_PAGE_SHIFT PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
#define BCM_RX_ETH_PAYLOAD_ALIGN 64
@@ -736,7 +739,7 @@ struct bnx2x {
struct bnx2x_fastpath fp[MAX_CONTEXT];
void __iomem *regview;
void __iomem *doorbells;
-#define BNX2X_DB_SIZE (16*2048)
+#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
struct net_device *dev;
struct pci_dev *pdev;
@@ -801,6 +804,8 @@ struct bnx2x {
#define TPA_ENABLE_FLAG 0x80
#define NO_MCP_FLAG 0x100
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
+#define HW_VLAN_TX_FLAG 0x400
+#define HW_VLAN_RX_FLAG 0x800
int func;
#define BP_PORT(bp) (bp->func % PORT_MAX)
@@ -811,7 +816,7 @@ struct bnx2x {
int pm_cap;
int pcie_cap;
- struct work_struct sp_task;
+ struct delayed_work sp_task;
struct work_struct reset_task;
struct timer_list timer;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 4be05847f86..7c533797c06 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -38,9 +38,7 @@
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
-#ifdef NETIF_F_HW_VLAN_TX
- #include <linux/if_vlan.h>
-#endif
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/checksum.h>
@@ -95,6 +93,7 @@ MODULE_PARM_DESC(debug, "default debug msglevel");
module_param(use_multi, int, 0);
MODULE_PARM_DESC(use_multi, "use per-CPU queues");
#endif
+static struct workqueue_struct *bnx2x_wq;
enum bnx2x_board_type {
BCM57710 = 0,
@@ -671,7 +670,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
synchronize_irq(bp->pdev->irq);
/* make sure sp_task is not running */
- cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work(&bp->sp_task);
+ flush_workqueue(bnx2x_wq);
}
/* fast path */
@@ -972,7 +972,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
return;
pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
- BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+ SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
sw_buf->page = NULL;
@@ -1000,7 +1000,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
if (unlikely(page == NULL))
return -ENOMEM;
- mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
+ mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1096,9 +1096,9 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
struct eth_fast_path_rx_cqe *fp_cqe)
{
struct bnx2x *bp = fp->bp;
- u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+ u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
le16_to_cpu(fp_cqe->len_on_bd)) >>
- BCM_PAGE_SHIFT;
+ SGE_PAGE_SHIFT;
u16 last_max, last_elem, first_elem;
u16 delta = 0;
u16 i;
@@ -1203,22 +1203,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 cqe_idx)
{
struct sw_rx_page *rx_pg, old_rx_pg;
- struct page *sge;
u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
u32 i, frag_len, frag_size, pages;
int err;
int j;
frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
- pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
+ pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
/* This is needed in order to enable forwarding support */
if (frag_size)
- skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
+ skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
max(frag_size, (u32)len_on_bd));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages > 8*PAGES_PER_SGE) {
+ if (pages >
+ min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1234,9 +1234,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
- frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
+ frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
rx_pg = &fp->rx_page_ring[sge_idx];
- sge = rx_pg->page;
old_rx_pg = *rx_pg;
/* If we fail to allocate a substitute page, we simply stop
@@ -1249,7 +1248,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap the page as we r going to pass it to the stack */
pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
- BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+ SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1282,6 +1281,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
/* (no need to map the new skb) */
+#ifdef BCM_VLAN
+ int is_vlan_cqe =
+ (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
+ PARSING_FLAGS_VLAN);
+ int is_not_hwaccel_vlan_cqe =
+ (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
+#endif
prefetch(skb);
prefetch(((char *)(skb)) + 128);
@@ -1306,6 +1312,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct iphdr *iph;
iph = (struct iphdr *)skb->data;
+#ifdef BCM_VLAN
+ /* If there is no Rx VLAN offloading -
+ take VLAN tag into an account */
+ if (unlikely(is_not_hwaccel_vlan_cqe))
+ iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
+#endif
iph->check = 0;
iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
}
@@ -1313,9 +1325,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (!bnx2x_fill_frag_skb(bp, fp, skb,
&cqe->fast_path_cqe, cqe_idx)) {
#ifdef BCM_VLAN
- if ((bp->vlgrp != NULL) &&
- (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
- PARSING_FLAGS_VLAN))
+ if ((bp->vlgrp != NULL) && is_vlan_cqe &&
+ (!is_not_hwaccel_vlan_cqe))
vlan_hwaccel_receive_skb(skb, bp->vlgrp,
le16_to_cpu(cqe->fast_path_cqe.
vlan_tag));
@@ -1355,11 +1366,23 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
rx_prods.cqe_prod = rx_comp_prod;
rx_prods.sge_prod = rx_sge_prod;
+ /*
+ * Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
REG_WR(bp, BAR_TSTRORM_INTMEM +
TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
((u32 *)&rx_prods)[i]);
+ mmiowb(); /* keep prod updates ordered */
+
DP(NETIF_MSG_RX_STATUS,
"Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
bd_prod, rx_comp_prod, rx_sge_prod);
@@ -1415,7 +1438,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
cqe_fp_flags, cqe->fast_path_cqe.status_flags,
- cqe->fast_path_cqe.rss_hash_result,
+ le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
le16_to_cpu(cqe->fast_path_cqe.pkt_len));
@@ -1547,7 +1570,7 @@ reuse_rx:
}
#ifdef BCM_VLAN
- if ((bp->vlgrp != NULL) &&
+ if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
PARSING_FLAGS_VLAN))
vlan_hwaccel_receive_skb(skb, bp->vlgrp,
@@ -1580,7 +1603,6 @@ next_cqe:
/* Update producers */
bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
fp->rx_sge_prod);
- mmiowb(); /* keep prod updates ordered */
fp->rx_pkt += rx_pkt;
fp->rx_calls++;
@@ -1660,7 +1682,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
if (unlikely(status & 0x1)) {
- schedule_work(&bp->sp_task);
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
status &= ~0x1;
if (!status)
@@ -1887,7 +1909,8 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
static void bnx2x_calc_fc_adv(struct bnx2x *bp)
{
- switch (bp->link_vars.ieee_fc) {
+ switch (bp->link_vars.ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
ADVERTISED_Pause);
@@ -1957,10 +1980,11 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
+ bnx2x_calc_fc_adv(bp);
+
if (bp->link_vars.link_up)
bnx2x_link_report(bp);
- bnx2x_calc_fc_adv(bp);
return rc;
}
@@ -2220,9 +2244,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_acquire_phy_lock(bp);
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- bnx2x_release_phy_lock(bp);
if (bp->link_vars.link_up) {
@@ -2471,6 +2493,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
if (asserted & ATTN_HARD_WIRED_MASK) {
if (asserted & ATTN_NIG_FOR_FUNC) {
+ bnx2x_acquire_phy_lock(bp);
+
/* save nig interrupt mask */
bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
REG_WR(bp, nig_int_mask_addr, 0);
@@ -2526,8 +2550,10 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
REG_WR(bp, hc_addr, asserted);
/* now set back the mask */
- if (asserted & ATTN_NIG_FOR_FUNC)
+ if (asserted & ATTN_NIG_FOR_FUNC) {
REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
+ bnx2x_release_phy_lock(bp);
+ }
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -2795,8 +2821,10 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
static void bnx2x_attn_int(struct bnx2x *bp)
{
/* read local copy of bits */
- u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
- u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
+ u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits);
+ u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+ attn_bits_ack);
u32 attn_state = bp->attn_state;
/* look for changed bits */
@@ -2820,7 +2848,7 @@ static void bnx2x_attn_int(struct bnx2x *bp)
static void bnx2x_sp_task(struct work_struct *work)
{
- struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
u16 status;
@@ -2844,7 +2872,7 @@ static void bnx2x_sp_task(struct work_struct *work)
if (status & 0x2)
bp->stats_pending = 0;
- bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
+ bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
IGU_INT_NOP, 1);
bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
IGU_INT_NOP, 1);
@@ -2875,7 +2903,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
return IRQ_HANDLED;
#endif
- schedule_work(&bp->sp_task);
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
return IRQ_HANDLED;
}
@@ -2892,7 +2920,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
do { \
s_lo += a_lo; \
- s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
+ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
} while (0)
/* difference = minuend - subtrahend */
@@ -4496,7 +4524,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
static void bnx2x_init_ind_table(struct bnx2x *bp)
{
- int port = BP_PORT(bp);
+ int func = BP_FUNC(bp);
int i;
if (!is_multi(bp))
@@ -4505,10 +4533,8 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
- i % bp->num_queues);
-
- REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+ TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
+ BP_CL_ID(bp) + (i % bp->num_queues));
}
static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -4517,12 +4543,12 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
int port = BP_PORT(bp);
int i;
- tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
+ tstorm_client.mtu = bp->dev->mtu;
tstorm_client.statistics_counter_id = BP_CL_ID(bp);
tstorm_client.config_flags =
TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
#ifdef BCM_VLAN
- if (bp->rx_mode && bp->vlgrp) {
+ if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
tstorm_client.config_flags |=
TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -4531,7 +4557,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
if (bp->flags & TPA_ENABLE_FLAG) {
tstorm_client.max_sges_for_packet =
- BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
+ SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
tstorm_client.max_sges_for_packet =
((tstorm_client.max_sges_for_packet +
PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
@@ -4714,10 +4740,11 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
bp->e1hov);
}
- /* Init CQ ring mapping and aggregation size */
- max_agg_size = min((u32)(bp->rx_buf_size +
- 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
- (u32)0xffff);
+ /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
+ max_agg_size =
+ min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
+ SGE_PAGE_SIZE * PAGES_PER_SGE),
+ (u32)0xffff);
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -4785,6 +4812,15 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
bnx2x_init_context(bp);
bnx2x_init_internal(bp, load_code);
bnx2x_init_ind_table(bp);
+ bnx2x_stats_init(bp);
+
+ /* At this point, we are ready for interrupts */
+ atomic_set(&bp->intr_sem, 0);
+
+ /* flush all before enabling interrupts */
+ mb();
+ mmiowb();
+
bnx2x_int_enable(bp);
}
@@ -5134,7 +5170,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
- REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
@@ -5212,6 +5247,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
}
bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
+ REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
/* set NIC mode */
REG_WR(bp, PRS_REG_NIC_MODE, 1);
if (CHIP_IS_E1H(bp))