aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ibm_newemac/Kconfig53
-rw-r--r--drivers/net/ibm_newemac/core.c761
-rw-r--r--drivers/net/ibm_newemac/core.h22
-rw-r--r--drivers/net/ibm_newemac/emac.h21
-rw-r--r--drivers/net/ibm_newemac/mal.c487
-rw-r--r--drivers/net/ibm_newemac/mal.h108
-rw-r--r--drivers/net/ibm_newemac/phy.c188
-rw-r--r--drivers/net/ibm_newemac/rgmii.c2
-rw-r--r--drivers/net/ibm_newemac/tah.c4
-rw-r--r--drivers/net/ibm_newemac/zmii.c2
10 files changed, 1577 insertions, 71 deletions
diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig
index 78a1628c989..a2490452a80 100644
--- a/drivers/net/ibm_newemac/Kconfig
+++ b/drivers/net/ibm_newemac/Kconfig
@@ -27,6 +27,13 @@ config IBM_NEW_EMAC_RX_COPY_THRESHOLD
depends on IBM_NEW_EMAC
default "256"
+config IBM_EMAC_MAL_QOS_V404
+ bool "VLAN QOS support"
+ depends on IBM_NEW_EMAC && 460SX
+ select VLAN_8021Q
+ help
+ When selected the VLAN QOS support will be enabled.
+
config IBM_NEW_EMAC_RX_SKB_HEADROOM
int "Additional RX skb headroom (bytes)"
depends on IBM_NEW_EMAC
@@ -39,6 +46,17 @@ config IBM_NEW_EMAC_RX_SKB_HEADROOM
If unsure, set to 0.
+config IBM_NEW_EMAC_MASK_CEXT
+ bool "Mask Carrier Extension signals"
+ depends on IBM_NEW_EMAC && APM82181
+ default n
+ help
+ During normal idle TX, continously send dummy packets to mask
+ the Carrier Extension signals. This creates a separate BD
+ specifically for this purpose.
+
+ If unsure, set to N.
+
config IBM_NEW_EMAC_DEBUG
bool "Debugging"
depends on IBM_NEW_EMAC
@@ -63,6 +81,33 @@ config IBM_NEW_EMAC_EMAC4
bool
default n
+config IBM_NEW_EMAC_INTR_COALESCE
+ bool "Hardware Interrupt coalescing"
+ depends on IBM_NEW_EMAC && (460EX || 460GT || 405EX || 460SX || APM82181)
+ default y
+ help
+ When selected the Ethernet interrupt coalescing is selected.
+
+config IBM_NEW_EMAC_TX_COAL_COUNT
+ int "TX Coalescence frame count (packets)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "16"
+
+config IBM_NEW_EMAC_TX_COAL_TIMER
+ int "TX Coalescence timer (clock ticks)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "1000000"
+
+config IBM_NEW_EMAC_RX_COAL_COUNT
+ int "RX Coalescence frame count (packets)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "1"
+
+config IBM_NEW_EMAC_RX_COAL_TIMER
+ int "RX Coalescence timer (clock ticks)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "1000000"
+
config IBM_NEW_EMAC_NO_FLOW_CTRL
bool
default n
@@ -74,3 +119,11 @@ config IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
config IBM_NEW_EMAC_MAL_COMMON_ERR
bool
default n
+
+config IBM_NEW_EMAC_SYSFS
+ bool "sysfs support for IBM NEW EMAC"
+ depends on IBM_NEW_EMAC
+ default y
+ help
+ When selected, IBM NEW EMAC parameters are exported
+ via /sys interface
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3fae8755979..fb9049bcbce 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/sysctl.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -46,8 +47,12 @@
#include <asm/uaccess.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
+#include <asm/time.h>
#include "core.h"
+#define SDR0_PERCLK 0x4201
+#define TX_FIFO_SYNC_USEC 20
+
/*
* Lack of dma_unmap_???? calls is intentional.
@@ -146,18 +151,35 @@ static inline void emac_rx_clk_tx(struct emac_instance *dev)
{
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, 0x80000000 >> dev->cell_index);
+#elif defined(CONFIG_APM82181)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, 0x00000100 >> dev->cell_index);
+#else
dcri_clrset(SDR0, SDR0_MFR,
0, SDR0_MFR_ECS >> dev->cell_index);
#endif
+#endif
}
static inline void emac_rx_clk_default(struct emac_instance *dev)
{
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0x80000000 >> dev->cell_index, 0);
+#elif defined(CONFIG_APM82181)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0x00000100 >> dev->cell_index, 0);
+#else
+
dcri_clrset(SDR0, SDR0_MFR,
SDR0_MFR_ECS >> dev->cell_index, 0);
#endif
+#endif
}
/* PHY polling intervals */
@@ -196,6 +218,7 @@ static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
};
static irqreturn_t emac_irq(int irq, void *dev_instance);
+static irqreturn_t wol_irq(int irq, void *dev_instance);
static void emac_clean_tx_ring(struct emac_instance *dev);
static void __emac_set_multicast_list(struct emac_instance *dev);
@@ -247,6 +270,59 @@ static void emac_tx_disable(struct emac_instance *dev)
}
}
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+static void emac_spin_delay(unsigned long spin_usecs)
+{
+ u64 tick_start, tick_end;
+ u64 spin_ticks = spin_usecs*tb_ticks_per_usec;
+ //printk("spin_ticks = %lld\n", spin_ticks);
+
+ tick_start = get_tb();
+ while(1) {
+ tick_end = get_tb();
+ if((tick_end - tick_start) >= spin_ticks)
+ return;
+ }
+}
+
+/* some code duplication here to avoid function calls */
+static inline void emac_start_idlemode(struct emac_instance *dev)
+{
+ u32 perclk;
+ //printk("ibmnewemac: start_idle\n");
+ DBG(dev, "start_idlemode" NL);
+
+ //emac_spin_delay(TX_FIFO_SYNC_USEC); /* Wait for TX FIFO to Sync */
+
+ /* Disable Ethernet Clock */
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ mtdcri(SDR0, SDR0_PERCLK, perclk | 0x88000000);
+ /* Write0 to set rising clock edge next time*/
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ mtdcri(SDR0, SDR0_PERCLK, perclk & 0x7fffffff);
+
+ //perclk = mfdcri(SDR0, SDR0_PERCLK);
+ //printk("%s:%d - Ethernet TX Clock Disabled perclk=0x%08lx\n", __FUNCTION__, __LINE__, perclk);
+}
+
+static inline void emac_exit_idlemode(struct emac_instance *dev)
+{
+ u32 perclk;
+ DBG(dev, "exit_idlemode" NL);
+
+ /* Enable Ethernet Clock */
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ mtdcri(SDR0, SDR0_PERCLK, (perclk & 0xF7ffffff) | 0x80000000);
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ /* Write0 to set rising clock edge next time*/
+ mtdcri(SDR0, SDR0_PERCLK, perclk & 0x7fffffff);
+
+ //perclk = mfdcri(SDR0, SDR0_PERCLK);
+ //printk("%s:%d - Ethernet TX Clock Enabled perclk=0x%08lx\n", __FUNCTION__, __LINE__, perclk);
+
+}
+#endif
+
static void emac_rx_enable(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
@@ -348,12 +424,24 @@ static int emac_reset(struct emac_instance *dev)
DBG(dev, "reset" NL);
if (!dev->reset_failed) {
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ if (atomic_read(&dev->mask_cext_enable))
+ if (atomic_read(&dev->idle_mode)) {
+ emac_exit_idlemode(dev);
+ atomic_set(&dev->idle_mode, 0);
+ }
+#endif
/* 40x erratum suggests stopping RX channel before reset,
* we stop TX as well
*/
emac_rx_disable(dev);
emac_tx_disable(dev);
}
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, 0x80000000 >> dev->cell_index);
+ out_be32(&p->mr1, in_be32(&p->mr1) | EMAC_MR1_ILE);
+#endif
#ifdef CONFIG_PPC_DCR_NATIVE
/* Enable internal clock source */
@@ -365,6 +453,11 @@ static int emac_reset(struct emac_instance *dev)
out_be32(&p->mr0, EMAC_MR0_SRST);
while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
--n;
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, 0x4103,
+ 0x80000000 >> dev->cell_index, 0);
+ out_be32(&p->mr1, in_be32(&p->mr1) & ~EMAC_MR1_ILE);
+#endif
#ifdef CONFIG_PPC_DCR_NATIVE
/* Enable external clock source */
@@ -383,6 +476,33 @@ static int emac_reset(struct emac_instance *dev)
}
}
+/* spham: backup code
+static void emac_hash_mc(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u16 gaht[8] = { 0 };
+ struct dev_mc_list *dmi;
+
+ DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
+
+ for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
+ int bit;
+ DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
+ dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
+ dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
+ bit = 255 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 24);
+ gaht[bit >> 5] |= 0x80000000 >> (bit & 0x1f);
+ }
+ out_be32(&p->gaht1, gaht[0]);
+ out_be32(&p->gaht2, gaht[1]);
+ out_be32(&p->gaht3, gaht[2]);
+ out_be32(&p->gaht4, gaht[3]);
+ out_be32(&p->gaht5, gaht[4]);
+ out_be32(&p->gaht6, gaht[5]);
+ out_be32(&p->gaht7, gaht[6]);
+ out_be32(&p->gaht8, gaht[7]);
+}
+*/
static void emac_hash_mc(struct emac_instance *dev)
{
const int regs = EMAC_XAHT_REGS(dev);
@@ -415,7 +535,7 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
struct emac_instance *dev = netdev_priv(ndev);
u32 r;
- r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
+ r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE | EMAC_RMR_RFP;
if (emac_has_feature(dev, EMAC_FTR_EMAC4))
r |= EMAC4_RMR_BASE;
@@ -430,6 +550,18 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
else if (ndev->mc_count > 0)
r |= EMAC_RMR_MAE;
+#if defined(CONFIG_APM82181)
+ /*
+ * When Jumbo Frame is not enabled, MJS field has no effect.
+ * So setting MJS when Jumbo Frame is disabled should not
+ * cause any issue.
+ */
+ DBG(dev, "emac_iff2rmr: Current MTU = %d" NL, ndev->mtu);
+ r &= ~EMAC4_RMR_MJS_MASK;
+ r |= EMAC4_RMR_MJS(ndev->mtu);
+ DBG(dev, "emac_iff2rmr: EMAC_RMR = 0x%08x" NL, r);
+#endif
+
return r;
}
@@ -465,7 +597,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
{
- u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
+ u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR | EMAC_MR1_APP |
EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
DBG2(dev, "__emac4_calc_base_mr1" NL);
@@ -474,6 +606,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_TFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_TFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_TFS_4K;
break;
@@ -489,6 +624,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_RFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_RFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_RFS_4K;
break;
@@ -559,7 +697,11 @@ static int emac_configure(struct emac_instance *dev)
/* Check for full duplex */
else if (dev->phy.duplex == DUPLEX_FULL)
+#if !defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
+#else
+ mr1 |= EMAC_MR1_FDE;
+#endif
/* Adjust fifo sizes, mr1 and timeouts based on link speed */
dev->stop_timeout = STOP_TIMEOUT_10;
@@ -626,7 +768,7 @@ static int emac_configure(struct emac_instance *dev)
ndev->dev_addr[5]);
/* VLAN Tag Protocol ID */
- out_be32(&p->vtpid, 0x8100);
+ out_be32(&p->vtpid, 0x07ff);
/* Receive mode register */
r = emac_iff2rmr(ndev);
@@ -984,32 +1126,103 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
dev->rx_desc[i].data_len = 0;
dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
(i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ dev->rx_desc[i].ctrl |= MAL_RX_CTRL_INTR;
+#endif
}
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for ( v = 1; v < dev->rx_vchans; v++ ) {
+ struct emac_instance *vdev = dev->vdev[v];
+ if (vdev->rx_sg_skb) {
+ ++vdev->estats.rx_dropped_resize;
+ dev_kfree_skb(vdev->rx_sg_skb);
+ vdev->rx_sg_skb = NULL;
+ }
+
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ if (vdev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
+ ++vdev->estats.rx_dropped_resize;
+
+ vdev->rx_desc[i].data_len = 0;
+ vdev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
+ (i == (NUM_RX_BUFF - 1) ?
+ MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ vdev->rx_desc[i].ctrl |= MAL_RX_CTRL_INTR;
+#endif
+ }
+ }
+ }
+#endif
+
/* Reallocate RX ring only if bigger skb buffers are required */
+ DBG(dev, "New rx_skb_size = %d" NL, rx_skb_size);
+ DBG(dev, "Current rx_skb_size = %d" NL, dev->rx_skb_size);
if (rx_skb_size <= dev->rx_skb_size)
goto skip;
-
+ DBG(dev, "Alocating new SKB buffers" NL);
/* Second pass, allocate new skbs */
for (i = 0; i < NUM_RX_BUFF; ++i) {
- struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
+ struct sk_buff *skb;
+
+ /* Try to free mem. before doing new mem. allocation */
+ BUG_ON(!dev->rx_skb[i]);
+ dev_kfree_skb(dev->rx_skb[i]);
+
+ skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
if (!skb) {
+ DBG(dev, "Cannot allocate new SKB entry %d" NL, i);
ret = -ENOMEM;
goto oom;
}
- BUG_ON(!dev->rx_skb[i]);
- dev_kfree_skb(dev->rx_skb[i]);
-
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM);
dev->rx_desc[i].data_ptr =
dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
DMA_FROM_DEVICE) + 2;
dev->rx_skb[i] = skb;
}
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for ( v = 1; v < dev->rx_vchans; v++ ) {
+ struct emac_instance *vdev = dev->vdev[v];
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ struct sk_buff *skb =
+ alloc_skb(rx_skb_size, GFP_ATOMIC);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto oom;
+ }
+
+ BUG_ON(!vdev->rx_skb[i]);
+ dev_kfree_skb(vdev->rx_skb[i]);
+
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ vdev->rx_desc[i].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2,
+ rx_sync_size,DMA_FROM_DEVICE) + 2;
+ vdev->rx_skb[i] = skb;
+ }
+ }
+ }
+#endif
+
skip:
/* Check if we need to change "Jumbo" bit in MR1 */
- if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
+#if defined(CONFIG_APM82181)
+ /*
+ * Maui supports setting Max Jumbo Frame size
+ * so we need to update it here
+ */
+ if ((new_mtu > ETH_DATA_LEN) || (dev->ndev->mtu > ETH_DATA_LEN)) {
+#else
+ if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
+#endif
/* This is to prevent starting RX channel in emac_rx_enable() */
set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
@@ -1088,6 +1301,27 @@ static void emac_clean_rx_ring(struct emac_instance *dev)
dev_kfree_skb(dev->rx_sg_skb);
dev->rx_sg_skb = NULL;
}
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for ( v = 1; v < dev->rx_vchans; v++ ) {
+ struct emac_instance *vdev = dev->vdev[v];
+ for (i = 0; i < NUM_RX_BUFF; ++i)
+ if (vdev->rx_skb[i]) {
+ vdev->rx_desc[i].ctrl = 0;
+ dev_kfree_skb(vdev->rx_skb[i]);
+ vdev->rx_skb[i] = NULL;
+ vdev->rx_desc[i].data_ptr = 0;
+ }
+
+ if (vdev->rx_sg_skb) {
+ dev_kfree_skb(vdev->rx_sg_skb);
+ vdev->rx_sg_skb = NULL;
+ }
+ }
+ }
+#endif
}
static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
@@ -1100,13 +1334,16 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
dev->rx_skb[slot] = skb;
dev->rx_desc[slot].data_len = 0;
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM);
dev->rx_desc[slot].data_ptr =
dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
DMA_FROM_DEVICE) + 2;
wmb();
dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
(slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ dev->rx_desc[slot].ctrl |= MAL_RX_CTRL_INTR;
+#endif
return 0;
}
@@ -1139,6 +1376,15 @@ static int emac_open(struct net_device *ndev)
return err;
}
+ if (dev->wol_irq != NO_IRQ) {
+ /* Setup WOL IRQ handler */
+ err = request_irq(dev->wol_irq, wol_irq, 0, "EMAC WOL", dev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to request IRQ %d\n",
+ ndev->name, dev->wol_irq);
+ return err;
+ }
+ }
/* Allocate RX ring */
for (i = 0; i < NUM_RX_BUFF; ++i)
if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
@@ -1147,6 +1393,25 @@ static int emac_open(struct net_device *ndev)
goto oom;
}
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ /* alloc skb_buff's for the QOS virtual channels */
+ for ( v = 1; v < dev->rx_vchans; v++){
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ if (emac_alloc_rx_skb(dev->vdev[v],
+ i,GFP_KERNEL)){
+ printk(KERN_ERR "%s: failed to allocate"
+ " RX virtual ring\n",
+ ndev->name);
+ goto oom;
+ }
+ }
+ dev->vdev[v]->rx_sg_skb = NULL;
+ dev->vdev[v]->rx_slot = 0;
+ }
+ }
+#endif
dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
dev->rx_sg_skb = NULL;
@@ -1193,6 +1458,9 @@ static int emac_open(struct net_device *ndev)
oom:
emac_clean_rx_ring(dev);
free_irq(dev->emac_irq, dev);
+ if (dev->wol_irq != NO_IRQ) {
+ free_irq(dev->wol_irq, dev);
+ }
return -ENOMEM;
}
@@ -1310,6 +1578,8 @@ static int emac_close(struct net_device *ndev)
free_irq(dev->emac_irq, dev);
netif_carrier_off(ndev);
+ if (dev->wol_irq != NO_IRQ)
+ free_irq(dev->wol_irq, dev);
return 0;
}
@@ -1320,7 +1590,10 @@ static inline u16 emac_tx_csum(struct emac_instance *dev,
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
(skb->ip_summed == CHECKSUM_PARTIAL)) {
++dev->stats.tx_packets_csum;
- return EMAC_TX_CTRL_TAH_CSUM;
+ if (skb_is_gso(skb))
+ return EMAC_TX_CTRL_TAH_SSR0;
+ else
+ return EMAC_TX_CTRL_TAH_CSUM;
}
return 0;
}
@@ -1360,6 +1633,16 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ if (atomic_read(&dev->mask_cext_enable))
+ if (atomic_read(&dev->idle_mode)) {
+ emac_exit_idlemode(dev);
+ atomic_set(&dev->idle_mode, 0);
+ }
+#endif
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ ctrl |= MAL_TX_CTRL_INTR;
+#endif
slot = dev->tx_slot++;
if (dev->tx_slot == NUM_TX_BUFF) {
@@ -1371,7 +1654,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dev->tx_skb[slot] = skb;
dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
- skb->data, len,
+ skb->data, EMAC_DMA_ALIGN(len),
DMA_TO_DEVICE);
dev->tx_desc[slot].data_len = (u16) len;
wmb();
@@ -1394,6 +1677,9 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot,
ctrl |= MAL_TX_CTRL_LAST;
if (slot == NUM_TX_BUFF - 1)
ctrl |= MAL_TX_CTRL_WRAP;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ ctrl |= MAL_TX_CTRL_INTR;
+#endif
dev->tx_skb[slot] = NULL;
dev->tx_desc[slot].data_ptr = pd;
@@ -1423,6 +1709,14 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
return emac_start_xmit(skb, ndev);
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ if (atomic_read(&dev->mask_cext_enable))
+ if (atomic_read(&dev->idle_mode)) {
+ emac_exit_idlemode(dev);
+ atomic_set(&dev->idle_mode, 0);
+ }
+#endif
+
len -= skb->data_len;
/* Note, this is only an *estimation*, we can still run out of empty
@@ -1434,13 +1728,16 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
emac_tx_csum(dev, skb);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ ctrl |= MAL_TX_CTRL_INTR;
+#endif
slot = dev->tx_slot;
/* skb data */
dev->tx_skb[slot] = NULL;
chunk = min(len, MAL_MAX_TX_SIZE);
dev->tx_desc[slot].data_ptr = pd =
- dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
+ dma_map_single(&dev->ofdev->dev, skb->data, EMAC_DMA_ALIGN(len), DMA_TO_DEVICE);
dev->tx_desc[slot].data_len = (u16) chunk;
len -= chunk;
if (unlikely(len))
@@ -1481,6 +1778,7 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
*/
while (slot != dev->tx_slot) {
dev->tx_desc[slot].ctrl = 0;
+ dev->tx_desc[slot].ctrl |= (slot == (NUM_TX_BUFF - 1) ? MAL_TX_CTRL_WRAP : 0);
--dev->tx_cnt;
if (--slot < 0)
slot = NUM_TX_BUFF - 1;
@@ -1554,16 +1852,43 @@ static void emac_poll_tx(void *param)
if (--dev->tx_cnt)
goto again;
- }
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ else {
+ DBG(dev, "Testing for idle... " NL);
+ if (atomic_read(&dev->mask_cext_enable)) {
+ if (!atomic_read(&dev->idle_mode)) {
+ DBG(dev, "Entering idle mode" NL);
+ emac_start_idlemode(dev);
+ atomic_set(&dev->idle_mode, 1);
+ } else
+ DBG(dev, "Already In Idle Mode" NL);
+
+ }
+ }
+#endif
+ }
+
if (n) {
dev->ack_slot = slot;
if (netif_queue_stopped(dev->ndev) &&
dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
netif_wake_queue(dev->ndev);
-
DBG2(dev, "tx %d pkts" NL, n);
}
- }
+ }
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ else {
+ DBG(dev, "Testing for idle... " NL);
+ if (atomic_read(&dev->mask_cext_enable)) {
+ if (!atomic_read(&dev->idle_mode)) {
+ DBG(dev, "Entering idle mode" NL);
+ emac_start_idlemode(dev);
+ atomic_set(&dev->idle_mode, 1);
+ } else
+ DBG(dev, "Already In Idle Mode" NL);
+ }
+ }
+#endif
netif_tx_unlock_bh(dev->ndev);
}
@@ -1575,13 +1900,17 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
DBG2(dev, "recycle %d %d" NL, slot, len);
if (len)
- dma_map_single(&dev->ofdev->dev, skb->data - 2,
- EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
+ dev->rx_desc[slot].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2,
+ EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE) + 2;
dev->rx_desc[slot].data_len = 0;
wmb();
dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
(slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ dev->rx_desc[slot].ctrl |= MAL_RX_CTRL_INTR;
+#endif
}
static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
@@ -1685,11 +2014,11 @@ static int emac_poll_rx(void *param, int budget)
if (len && len < EMAC_RX_COPY_THRESH) {
struct sk_buff *copy_skb =
- alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
+ alloc_skb(len + EMAC_RX_SKB_HEADROOM, GFP_ATOMIC);
if (unlikely(!copy_skb))
goto oom;
- skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
+ skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM);
cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
len + 2);
emac_recycle_rx_skb(dev, slot, len);
@@ -1865,6 +2194,11 @@ static irqreturn_t emac_irq(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+static irqreturn_t wol_irq(int irq, void *dev_instance)
+{
+ return IRQ_HANDLED;
+}
+
static struct net_device_stats *emac_stats(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
@@ -2092,11 +2426,11 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
hdr->index = dev->cell_index;
if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
hdr->version = EMAC4_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
+ memcpy(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
} else {
hdr->version = EMAC_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
+ memcpy(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
}
}
@@ -2185,6 +2519,52 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
info->n_stats = emac_ethtool_get_stats_count(ndev);
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+static int emac_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct emac_instance *ei = netdev_priv(dev);
+ /* clean up */
+ memset(ec, 0, sizeof(*ec));
+
+ /* Update with current status */
+ ec->rx_coalesce_usecs = (ei->mal->coales_param[0].rx_time / ei->plb_bus_freq);
+ ec->rx_max_coalesced_frames = ei->mal->coales_param[0].rx_count;
+
+ ec->tx_coalesce_usecs = (ei->mal->coales_param[0].tx_time / ei->plb_bus_freq);
+ ec->tx_max_coalesced_frames = ei->mal->coales_param[0].tx_count;
+ return 0;
+}
+
+static int emac_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct emac_instance *ei = netdev_priv(dev);
+
+ ei->mal->coales_param[0].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[1].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[2].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[3].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+
+ ei->mal->coales_param[0].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[1].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[2].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[3].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+
+ ei->mal->coales_param[0].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[1].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[2].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[3].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+
+ ei->mal->coales_param[0].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[1].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[2].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[3].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+
+ mal_enable_coal(ei->mal);
+ return 0;
+}
+#endif
static const struct ethtool_ops emac_ethtool_ops = {
.get_settings = emac_ethtool_get_settings,
@@ -2208,8 +2588,256 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ .get_coalesce = emac_ethtool_get_coalesce,
+ .set_coalesce = emac_ethtool_set_coalesce,
+#endif
+
};
+/* sysfs support for IBM NEW EMAC */
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+
+/* Display interrupt coalesce parametters values */
+static ssize_t show_tx_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].tx_count);
+}
+static ssize_t show_rx_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].rx_count);
+}
+static ssize_t show_tx_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].tx_time);
+}
+static ssize_t show_rx_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].rx_time);
+}
+
+static int core_reset(struct emac_instance * dev_ins)
+{
+ mutex_lock(&dev_ins->link_lock);
+ emac_netif_stop(dev_ins);
+ emac_rx_disable(dev_ins);
+ mal_disable_rx_channel(dev_ins->mal, dev_ins->mal_rx_chan);
+
+ if (dev_ins->rx_sg_skb) {
+ ++dev_ins->estats.rx_dropped_resize;
+ dev_kfree_skb(dev_ins->rx_sg_skb);
+ dev_ins->rx_sg_skb = NULL;
+ }
+
+ /* This is to prevent starting RX channel in emac_rx_enable() */
+ set_bit(MAL_COMMAC_RX_STOPPED, &dev_ins->commac.flags);
+
+ emac_full_tx_reset(dev_ins);
+
+ /* Restart RX */
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev_ins->commac.flags);
+ dev_ins->rx_slot = 0;
+ mal_enable_rx_channel(dev_ins->mal, dev_ins->mal_rx_chan);
+ emac_rx_enable(dev_ins);
+ emac_netif_start(dev_ins);
+ mutex_unlock(&dev_ins->link_lock);
+
+ return 0;
+}
+
+/* Set interrupt coalesce parametters values */
+static ssize_t store_tx_count(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].tx_count = tmp;
+
+ mutex_lock(&dev_ins->link_lock);
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+static ssize_t store_rx_count(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].rx_count = tmp;
+
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mutex_lock(&dev_ins->link_lock);
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+static ssize_t store_tx_time(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].tx_time = tmp;
+
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mutex_lock(&dev_ins->link_lock);
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+static ssize_t store_rx_time(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].rx_time = tmp;
+
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mutex_lock(&dev_ins->link_lock);
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+
+#endif
+
+#if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
+
+static ssize_t show_emi_fix_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", atomic_read(&dev_ins->mask_cext_enable));
+}
+
+static ssize_t store_emi_fix_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ tmp = (tmp) ? 1 : 0;
+
+ printk(KERN_INFO "%s EMAC EMI Fix\n", (tmp) ? "Enable" : "Disable");
+ atomic_set(&dev_ins->mask_cext_enable, tmp);
+
+ /* Exit idle mode before return */
+ if (atomic_read(&dev_ins->idle_mode)) {
+ emac_exit_idlemode(dev_ins);
+ atomic_set(&dev_ins->idle_mode, 0);
+ }
+
+ return count;
+}
+
+#endif
+
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+static DEVICE_ATTR(coalesce_param_tx_count,
+ S_IRUGO | S_IWUSR, show_tx_count, store_tx_count);
+static DEVICE_ATTR(coalesce_param_rx_count,
+ S_IRUGO | S_IWUSR, show_rx_count, store_rx_count);
+static DEVICE_ATTR(coalesce_param_tx_time,
+ S_IRUGO | S_IWUSR, show_tx_time, store_tx_time);
+static DEVICE_ATTR(coalesce_param_rx_time,
+ S_IRUGO | S_IWUSR, show_rx_time, store_rx_time);
+#endif
+
+#if defined(CONFIG_APM82181)
+ #if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
+static DEVICE_ATTR(emi_fix_enable, S_IRUGO | S_IWUSR,
+ show_emi_fix_enable, store_emi_fix_enable);
+ #endif
+#endif
+
+static struct attribute *ibm_newemac_attr[] = {
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+ &dev_attr_coalesce_param_tx_count.attr,
+ &dev_attr_coalesce_param_rx_count.attr,
+ &dev_attr_coalesce_param_tx_time.attr,
+ &dev_attr_coalesce_param_rx_time.attr,
+#endif
+
+#if defined(CONFIG_APM82181)
+ #if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
+ &dev_attr_emi_fix_enable.attr,
+ #endif
+#endif
+ NULL
+};
+
+static const struct attribute_group ibm_newemac_attr_group = {
+ .attrs = ibm_newemac_attr,
+};
+
+#endif
+
+
static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct emac_instance *dev = netdev_priv(ndev);
@@ -2553,6 +3181,12 @@ static int __devinit emac_init_config(struct emac_instance *dev)
dev->gpcs_address = 0xffffffff;
if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
return -ENXIO;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ if (emac_read_uint_prop(np->parent->parent, "clock-frequency", &dev->plb_bus_freq, 1))
+ return -ENXIO;
+ /* save as MHz */
+ dev->plb_bus_freq /= 1000000;
+#endif
if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
dev->tah_ph = 0;
if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
@@ -2750,6 +3384,29 @@ static int __devinit emac_probe(struct of_device *ofdev,
dev->blist = blist;
SET_NETDEV_DEV(ndev, &ofdev->dev);
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ dev->vdev_index = 0;
+ dev->vdev[0] = NULL;
+
+ dev->mal_rx_chan = MAX_VCHANS;
+ dev->rx_vchans = dev->mal_rx_chan;
+ for (i = 1; i < dev->rx_vchans; i++) {
+ dev->vdev[i] = (struct emac_instance*)
+ alloc_etherdev(sizeof(struct emac_instance));
+ if (!dev->vdev[i]) {
+ printk(KERN_ERR "emac%s: could not allocate vchannel\n",
+ np->full_name);
+ return -ENOMEM;
+ }
+
+ dev->vdev[i]->vdev_index = i;
+ dev->vdev[i]->rx_vchans = 0; /* we are the virtual channel */
+ dev->vdev[i]->ndev = dev->ndev;
+ dev->vdev[i]->ofdev = dev->ofdev;
+ dev->vdev[i]->mal = dev->mal;
+ }
+#endif
+
/* Initialize some embedded data structures */
mutex_init(&dev->mdio_lock);
mutex_init(&dev->link_lock);
@@ -2813,6 +3470,13 @@ static int __devinit emac_probe(struct of_device *ofdev,
dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ for (i = 1; i < dev->rx_vchans; i++) {
+ dev->vdev[i]->rx_skb_size = emac_rx_skb_size(ndev->mtu);
+ dev->vdev[i]->rx_sync_size = emac_rx_sync_size(ndev->mtu);
+ }
+#endif
+
/* Get pointers to BD rings */
dev->tx_desc =
dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
@@ -2827,7 +3491,28 @@ static int __devinit emac_probe(struct of_device *ofdev,
memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
-
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ /* By default: DISABLE EMI fix */
+ atomic_set(&dev->mask_cext_enable, 0);
+#endif
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ /*
+ * On the 440GT and 440EX, the MAL RX active channel 0 (emac0) and
+ * active channel 8 (emac1) have 8 virtual RX channels each for QOS.
+ */
+ for (i = 1; i < dev->rx_vchans; i++) {
+ /* Get pointers to BD RX rings */
+ dev->vdev[i]->rx_desc =
+ dev->mal->bd_virt+mal_rx_bd_offset(dev->mal,
+ (i+dev->mal_rx_chan));
+
+ /* Clean rings */
+ memset(dev->vdev[i]->rx_desc, 0,
+ NUM_RX_BUFF * sizeof(struct mal_descriptor));
+ }
+#endif
+
/* Attach to ZMII, if needed */
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
(err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
@@ -2857,7 +3542,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
goto err_detach_tah;
if (dev->tah_dev)
- ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
ndev->watchdog_timeo = 5 * HZ;
if (emac_phy_supports_gige(dev->phy_mode)) {
ndev->netdev_ops = &emac_gige_netdev_ops;
@@ -2885,7 +3570,6 @@ static int __devinit emac_probe(struct of_device *ofdev,
/* There's a new kid in town ! Let's tell everybody */
wake_up_all(&emac_probe_wait);
-
printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
@@ -2895,9 +3579,20 @@ static int __devinit emac_probe(struct of_device *ofdev,
if (dev->phy.address >= 0)
printk("%s: found %s PHY (0x%02x)\n", ndev->name,
dev->phy.def->name, dev->phy.address);
+
emac_dbg_register(dev);
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+ /* Register sys fs hooks */
+ err = sysfs_create_group(&dev->ndev->dev.kobj,
+ &ibm_newemac_attr_group);
+ if (err) {
+ printk("WARN: %s: failed to create sys interfaces for EMAC-%d %s\n",
+ ndev->name, dev->cell_index, np->full_name);
+ goto err_sysfs;
+ }
+#endif
/* Life is good */
return 0;
@@ -2934,6 +3629,9 @@ static int __devinit emac_probe(struct of_device *ofdev,
*blist = NULL;
wake_up_all(&emac_probe_wait);
}
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+ err_sysfs:
+#endif
return err;
}
@@ -2945,6 +3643,9 @@ static int __devexit emac_remove(struct of_device *ofdev)
dev_set_drvdata(&ofdev->dev, NULL);
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+ sysfs_remove_group(&dev->ndev->dev.kobj, &ibm_newemac_attr_group);
+#endif
unregister_netdev(dev->ndev);
flush_scheduled_work();
@@ -2967,6 +3668,14 @@ static int __devexit emac_remove(struct of_device *ofdev)
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for (v = 1; v < dev->rx_vchans; v++) {
+ kfree(dev->vdev[v]);
+ }
+ }
+#endif
kfree(dev->ndev);
return 0;
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index 18d56c6c423..274514bdfa6 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -71,7 +71,7 @@ static inline int emac_rx_size(int mtu)
#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
#define EMAC_RX_SKB_HEADROOM \
- EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM)
+ EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM + 2)
/* Size of RX skb for the given MTU */
static inline int emac_rx_skb_size(int mtu)
@@ -161,6 +161,11 @@ struct emac_error_stats {
u64 tx_errors;
};
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+#define MAX_VCHANS 8 /* MAX virtual channels */
+#endif
+
#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \
sizeof(struct emac_error_stats)) \
/ sizeof(u64))
@@ -220,6 +225,11 @@ struct emac_instance {
/* OPB bus frequency in Mhz */
u32 opb_bus_freq;
+
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ /* PLB bus frequency in Mhz */
+ u32 plb_bus_freq;
+#endif
/* Cell index within an ASIC (for clk mgmnt) */
u32 cell_index;
@@ -266,6 +276,10 @@ struct emac_instance {
/* Misc
*/
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ atomic_t idle_mode;
+ atomic_t mask_cext_enable;
+#endif
int reset_failed;
int stop_timeout; /* in us */
int no_mcast;
@@ -273,6 +287,12 @@ struct emac_instance {
int opened;
struct work_struct reset_work;
spinlock_t lock;
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ int rx_vchans; /* N rx virtual channels */
+ int vdev_index;
+ struct emac_instance *vdev[MAX_VCHANS]; /* virtual channels */
+#endif
};
/*
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 8a61b597a16..bc54a228556 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -144,16 +144,18 @@ struct emac_regs {
#define EMAC_MR1_TFS_2K 0x00080000
#define EMAC_MR1_TR0_MULT 0x00008000
#define EMAC_MR1_JPSM 0x00000000
-#define EMAC_MR1_MWSW_001 0x00000000
+#define EMAC_MR1_MWSW_001 0x00001000
#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
#define EMAC4_MR1_RFS_2K 0x00100000
#define EMAC4_MR1_RFS_4K 0x00180000
+#define EMAC4_MR1_RFS_8K 0x00200000
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
-#define EMAC4_MR1_TFS_4K 0x00030000
-#define EMAC4_MR1_TFS_16K 0x00050000
+#define EMAC4_MR1_TFS_4K 0x00030000
+#define EMAC4_MR1_TFS_8K 0x00040000
+#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
#define EMAC4_MR1_JPSM 0x00000800
@@ -211,6 +213,10 @@ struct emac_regs {
#define EMAC4_RMR_RFAF_64_1024 0x00000006
#define EMAC4_RMR_RFAF_128_2048 0x00000007
#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048
+#if defined(CONFIG_APM82181)
+#define EMAC4_RMR_MJS_MASK 0x0001fff8
+#define EMAC4_RMR_MJS(s) ((s << 3) & EMAC4_RMR_MJS_MASK)
+#endif
/* EMACx_ISR & EMACx_ISER */
#define EMAC4_ISR_TXPE 0x20000000
@@ -264,7 +270,7 @@ struct emac_regs {
/* EMACx_TRTR */
#define EMAC_TRTR_SHIFT_EMAC4 24
-#define EMAC_TRTR_SHIFT 27
+#define EMAC_TRTR_SHIFT 27
/* EMAC specific TX descriptor control fields (write access) */
#define EMAC_TX_CTRL_GFCS 0x0200
@@ -308,4 +314,11 @@ struct emac_regs {
EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
EMAC_RX_ST_IRE )
+#define EMAC_TX_CTRL_TAH_SSR0 0x0002
+#define EMAC_TX_CTRL_TAH_SSR1 0x0004
+#define EMAC_TX_CTRL_TAH_SSR2 0x0006
+#define EMAC_TX_CTRL_TAH_SSR3 0x0008
+#define EMAC_TX_CTRL_TAH_SSR4 0x000a
+#define EMAC_TX_CTRL_TAH_SSR5 0x000c
+#define EMAC_TX_CTRL_TAH_CSUM 0x000e
#endif /* __IBM_NEWEMAC_H */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 2a2fc17b287..1f79a34bedf 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -29,8 +29,23 @@
#include "core.h"
#include <asm/dcr-regs.h>
+#include <asm/ppc4xx_ocm.h>
static int mal_count;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+static char *tx_coal_irqname[] = {
+ "TX0 COAL",
+ "TX1 COAL",
+ "TX2 COAL",
+ "TX3 COAL",
+};
+static char *rx_coal_irqname[] = {
+ "RX0 COAL",
+ "RX1 COAL",
+ "RX2 COAL",
+ "RX3 COAL",
+};
+#endif
int __devinit mal_register_commac(struct mal_instance *mal,
struct mal_commac *commac)
@@ -217,9 +232,176 @@ static inline void mal_disable_eob_irq(struct mal_instance *mal)
MAL_DBG2(mal, "disable_irq" NL);
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+
+#if defined(CONFIG_460SX)
+/* Set Tx fram count */
+static inline void set_ic_txfthr(struct mal_instance *mal)
+{
+ int reg;
+ int val = mal->coales_param[0].tx_count;
+
+ reg = (val<<23) | (1<<22) ;
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX0, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX0,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX1, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX1,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX2, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX2,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX3, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX3,(val<<23)); /* enable counter */
+
+
+ mal->enet_coales_iccrtx = reg;
+}
+/* Set Rx fram count */
+static inline void set_ic_rxfthr(struct mal_instance *mal)
+{
+ int reg;
+ int val = mal->coales_param[0].rx_count;
+
+ reg = (val<<23) | (1<<22) ;
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX0,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX0,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX1,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX1,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX2,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX2,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX3,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX3,(val<<23)); /* enable counter */
+
+ mal->enet_coales_iccrrx = reg;
+}
+#endif
+
+inline void mal_enable_coal(struct mal_instance *mal)
+{
+ unsigned int val;
+#if defined(CONFIG_405EX)
+ /* Clear the counters */
+ val = SDR0_ICC_FLUSH0 | SDR0_ICC_FLUSH1;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX, val);
+
+ /* Set Tx/Rx Timer values */
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX1, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX1, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+
+ /* Enable the Tx/Rx Coalescing interrupt */
+ val = ((CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR0_SHIFT) |
+ ((CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR1_SHIFT);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX, val);
+
+ val = ((CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR0_SHIFT) |
+ ((CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR1_SHIFT);
+
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX, val);
+#elif defined(CONFIG_APM82181)
+ /* Clear the counters */
+ val = SDR0_ICC_FLUSH;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+
+ /* Set Tx/Rx Timer values */
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, mal->coales_param[0].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, mal->coales_param[0].rx_time);
+
+ /* Enable the Tx/Rx Coalescing interrupt */
+ val = (mal->coales_param[0].tx_count & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+
+ val = (mal->coales_param[0].rx_count & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+
+#elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
+ /* Clear the counters */
+ val = SDR0_ICC_FLUSH;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX1, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX1, val);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX3, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX3, val);
+#endif
+
+ /* Set Tx/Rx Timer values */
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX1, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX1, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX2, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX3, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX2, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX3, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+#endif
+
+ /* Enable the Tx/Rx Coalescing interrupt */
+ val = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX1, val);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX3, val);
+#endif
+
+ val = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX1, val);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX3, val);
+#endif
+
+#elif defined(CONFIG_460SX)
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, mal->coales_param[0].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX1, mal->coales_param[1].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX2, mal->coales_param[2].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX3, mal->coales_param[3].tx_time);
+
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, mal->coales_param[0].rx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX1, mal->coales_param[1].rx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX2, mal->coales_param[2].rx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX3, mal->coales_param[3].rx_time);
+
+ set_ic_rxfthr(mal);
+ set_ic_txfthr(mal);
+#endif
+ printk(KERN_INFO "MAL: Enabled Interrupt Coal TxCnt: %d RxCnt: %d\n",
+ mal->coales_param[0].tx_count,
+ mal->coales_param[0].rx_count);
+
+ printk(KERN_INFO " TxTimer: %d RxTimer: %d\n",
+ mal->coales_param[0].tx_time,
+ mal->coales_param[0].rx_time);
+}
+#endif
+
static irqreturn_t mal_serr(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
+ struct list_head *l;
u32 esr = get_mal_dcrn(mal, MAL_ESR);
@@ -256,6 +438,14 @@ static irqreturn_t mal_serr(int irq, void *dev_instance)
"mal%d: system error, OPB (ESR = 0x%08x)\n",
mal->index, esr);
}
+
+
+ list_for_each(l, &mal->poll_list) {
+ struct mal_commac *mc =
+ list_entry(l, struct mal_commac, poll_list);
+ mc->ops->reset(mc->dev);
+ }
+
return IRQ_HANDLED;
}
@@ -309,6 +499,15 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+static irqreturn_t mal_coal(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+ mal_schedule_poll(mal);
+ return IRQ_HANDLED;
+}
+#endif
+
static irqreturn_t mal_txde(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
@@ -393,6 +592,9 @@ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
static int mal_poll(struct napi_struct *napi, int budget)
{
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ int v;
+#endif
struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
struct list_head *l;
int received = 0;
@@ -455,6 +657,32 @@ static int mal_poll(struct napi_struct *napi, int budget)
mc->ops->poll_tx(mc->dev);
}
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ /* Process RX skbs QOS virtual channels.
+ *
+ */
+ for ( v = 1; v < MAX_VCHANS; v++ ) {
+ list_for_each(l, &mal->poll_list) {
+ struct mal_commac *mc =
+ list_entry(l, struct mal_commac, poll_list);
+ struct emac_instance *dev = mc->dev;
+ int n;
+ if ( v >= dev->rx_vchans ) {
+ continue;
+ }
+ n = mc->ops->poll_rx(dev->vdev[v],budget);
+ if (n) {
+ received += n;
+ budget -= n;
+ if (budget <= 0) {
+ goto more_work;
+ }
+ }
+ }
+
+ }
+#endif
+
more_work:
MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
return received;
@@ -516,6 +744,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
return regs + 1;
}
+
static int __devinit mal_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
@@ -524,9 +753,14 @@ static int __devinit mal_probe(struct of_device *ofdev,
int index = mal_count++;
unsigned int dcr_base;
const u32 *prop;
+ const char *str_prop;
u32 cfg;
unsigned long irqflags;
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ int num_phys_chans;
+ int coal_intr_index;
+#endif
mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
if (!mal) {
@@ -541,6 +775,13 @@ static int __devinit mal_probe(struct of_device *ofdev,
MAL_DBG(mal, "probe" NL);
+ str_prop = of_get_property(ofdev->node, "descriptor-memory", NULL);
+ if (str_prop && (!strcmp(str_prop,"ocm") || !strcmp(str_prop,"OCM"))) {
+ printk(KERN_INFO
+ "mal%d: descriptor-memory = %s\n", index, str_prop);
+ mal->desc_memory = MAL_DESC_MEM_OCM;
+ }
+
prop = of_get_property(ofdev->node, "num-tx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
@@ -609,6 +850,46 @@ static int __devinit mal_probe(struct of_device *ofdev,
goto fail_unmap;
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ /* Number of Tx channels is equal to Physical channels */
+ /* Rx channels include Virtual channels so use Tx channels */
+ BUG_ON(mal->num_tx_chans > MAL_MAX_PHYS_CHANNELS);
+ num_phys_chans = mal->num_tx_chans;
+ /* Older revs in 460EX and 460GT have coalesce bug in h/w */
+#if defined(CONFIG_460EX) || defined(CONFIG_460GT)
+ {
+ unsigned int pvr;
+ unsigned short min;
+ pvr = mfspr(SPRN_PVR);
+ min = PVR_MIN(pvr);
+ if (min < 4) {
+ printk(KERN_INFO "PVR %x Intr Coal disabled: H/W bug\n",
+ pvr);
+ mal->coalesce_disabled = 1;
+ }
+ }
+#else
+ mal->coalesce_disabled = 0;
+#endif
+ coal_intr_index = 5;
+
+ /* If device tree doesn't Interrupt coal IRQ, fall back to EOB IRQ */
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0) ; i++) {
+ mal->txcoal_irq[i] = irq_of_parse_and_map(ofdev->node, coal_intr_index++);
+ if (mal->txcoal_irq[i] == NO_IRQ) {
+ printk(KERN_INFO "MAL: No device tree IRQ for TxCoal%d - disabling coalescing\n", i);
+ mal->coalesce_disabled = 1;
+ }
+ }
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0); i++) {
+ mal->rxcoal_irq[i] = irq_of_parse_and_map(ofdev->node, coal_intr_index++);
+ if (mal->rxcoal_irq[i] == NO_IRQ) {
+ printk(KERN_INFO "MAL: No device tree IRQ for RxCoal%d - disabling coalescing\n", i);
+ mal->coalesce_disabled = 1;
+ }
+ }
+#endif
+
INIT_LIST_HEAD(&mal->poll_list);
INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock);
@@ -641,9 +922,25 @@ static int __devinit mal_probe(struct of_device *ofdev,
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt =
- dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+
+ if (mal->desc_memory == MAL_DESC_MEM_OCM) {
+ mal->bd_virt = ocm_alloc(&mal->bd_phys, bd_size, 4,
+ OCM_NON_CACHED, "mal_descriptors");
+ mal->bd_dma = (u32)mal->bd_phys;
+ }
+
+ if (mal->bd_virt == NULL) {
+ /* Allocate BD on SDRAM in case !MAL_DESC_MEM_OCM or failed OCM alloc */
+ if (mal->desc_memory == MAL_DESC_MEM_OCM){
+ printk(KERN_INFO
+ "mal%d: failed OCM alloc, descriptor-memory = SDRAM\n", index);
+ mal->desc_memory = MAL_DESC_MEM_SDRAM;
+ }
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size,
+ &mal->bd_dma, GFP_KERNEL);
+ }
+
+
if (mal->bd_virt == NULL) {
printk(KERN_ERR
"mal%d: out of memory allocating RX/TX descriptors!\n",
@@ -651,17 +948,25 @@ static int __devinit mal_probe(struct of_device *ofdev,
err = -ENOMEM;
goto fail_unmap;
}
+
memset(mal->bd_virt, 0, bd_size);
+ for (i = 0; i < mal->num_tx_chans; ++i) {
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ set_mal_dcrn(mal, MAL_TXBADDR, (mal->bd_phys >> 32));
- for (i = 0; i < mal->num_tx_chans; ++i)
set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
sizeof(struct mal_descriptor) *
mal_tx_bd_offset(mal, i));
+ }
+
+ for (i = 0; i < mal->num_rx_chans; ++i) {
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ set_mal_dcrn(mal, MAL_RXBADDR, (u32)(mal->bd_phys >> 32));
- for (i = 0; i < mal->num_rx_chans; ++i)
set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
sizeof(struct mal_descriptor) *
mal_rx_bd_offset(mal, i));
+ }
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
irqflags = IRQF_SHARED;
@@ -674,20 +979,65 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
- if (err)
- goto fail2;
+ if (err) {
+ mal->serr_irq = NO_IRQ;
+ goto failirq;
+ }
err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
- if (err)
- goto fail3;
- err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
- if (err)
- goto fail4;
+ if (err) {
+ mal->txde_irq = NO_IRQ;
+ goto failirq;
+ }
err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
- if (err)
- goto fail5;
- err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
- if (err)
- goto fail6;
+ if (err) {
+ mal->rxde_irq = NO_IRQ;
+ goto failirq;
+ }
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0); i++) {
+ err = request_irq(mal->txcoal_irq[i],
+ mal_coal, 0, tx_coal_irqname[i], mal);
+ if (err) {
+ printk(KERN_INFO "MAL: TxCoal%d ReqIRQ failed - disabling coalescing\n", i);
+ mal->txcoal_irq[i] = NO_IRQ;
+ mal->coalesce_disabled = 1;
+ break;
+ }
+ }
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0); i++) {
+ err = request_irq(mal->rxcoal_irq[i],
+ mal_coal, 0, rx_coal_irqname[i], mal);
+ if (err) {
+ printk(KERN_INFO "MAL: RxCoal%d ReqIRQ failed - disabling coalescing\n", i);
+ mal->rxcoal_irq[i] = NO_IRQ;
+ mal->coalesce_disabled = 1;
+ break;
+ }
+ }
+
+ /* Fall back to EOB IRQ if coalesce not supported */
+ if (mal->coalesce_disabled) {
+ /* Clean up any IRQs allocated for Coalescing */
+ for (i = 0; i < num_phys_chans; i++) {
+ if (mal->txcoal_irq[i] != NO_IRQ)
+ free_irq(mal->txcoal_irq[i], mal);
+ if (mal->rxcoal_irq[i] != NO_IRQ)
+ free_irq(mal->rxcoal_irq[i], mal);
+ }
+#endif
+ err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
+ if (err) {
+ mal->txeob_irq = NO_IRQ;
+ goto failirq;
+ }
+ err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
+ if (err) {
+ mal->rxeob_irq = NO_IRQ;
+ goto failirq;
+ }
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ }
+#endif
/* Enable all MAL SERR interrupt sources */
if (mal->version == 2)
@@ -695,6 +1045,31 @@ static int __devinit mal_probe(struct of_device *ofdev,
else
set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ if (mal->coalesce_disabled == 0) {
+ mal->coales_param[0].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[1].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[2].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[3].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+
+ mal->coales_param[0].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[1].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[2].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[3].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+
+ mal->coales_param[0].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+ mal->coales_param[1].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+ mal->coales_param[2].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+ mal->coales_param[3].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+
+ mal->coales_param[0].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+ mal->coales_param[1].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+ mal->coales_param[2].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+ mal->coales_param[3].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+
+ mal_enable_coal(mal);
+}
+#endif
/* Enable EOB interrupt */
mal_enable_eob_irq(mal);
@@ -711,16 +1086,35 @@ static int __devinit mal_probe(struct of_device *ofdev,
return 0;
- fail6:
- free_irq(mal->rxde_irq, mal);
- fail5:
- free_irq(mal->txeob_irq, mal);
- fail4:
- free_irq(mal->txde_irq, mal);
- fail3:
- free_irq(mal->serr_irq, mal);
- fail2:
- dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
+ failirq:
+ if (mal->serr_irq != NO_IRQ)
+ free_irq(mal->serr_irq, mal);
+ if (mal->txde_irq != NO_IRQ)
+ free_irq(mal->txde_irq, mal);
+ if (mal->rxde_irq != NO_IRQ)
+ free_irq(mal->rxde_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ if (mal->coalesce_disabled == 0) {
+ for (i = 0; i < num_phys_chans; i++) {
+ if (mal->txcoal_irq[i] != NO_IRQ)
+ free_irq(mal->txcoal_irq[i], mal);
+ if (mal->rxcoal_irq[i] != NO_IRQ)
+ free_irq(mal->rxcoal_irq[i], mal);
+ }
+ } else {
+#endif
+ if (mal->txeob_irq != NO_IRQ)
+ free_irq(mal->txeob_irq, mal);
+ if (mal->rxeob_irq != NO_IRQ)
+ free_irq(mal->rxeob_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ }
+#endif
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ ocm_free(mal->bd_virt);
+ else
+ dma_free_coherent(&ofdev->dev, bd_size,
+ mal->bd_virt, mal->bd_dma);
fail_unmap:
dcr_unmap(mal->dcr_host, 0x100);
fail:
@@ -732,6 +1126,10 @@ static int __devinit mal_probe(struct of_device *ofdev,
static int __devexit mal_remove(struct of_device *ofdev)
{
struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ int i;
+ int num_phys_chans;
+#endif
MAL_DBG(mal, "remove" NL);
@@ -748,17 +1146,38 @@ static int __devexit mal_remove(struct of_device *ofdev)
dev_set_drvdata(&ofdev->dev, NULL);
- free_irq(mal->serr_irq, mal);
- free_irq(mal->txde_irq, mal);
- free_irq(mal->txeob_irq, mal);
- free_irq(mal->rxde_irq, mal);
- free_irq(mal->rxeob_irq, mal);
-
+ if (mal->serr_irq != NO_IRQ)
+ free_irq(mal->serr_irq, mal);
+ if (mal->txde_irq != NO_IRQ)
+ free_irq(mal->txde_irq, mal);
+ if (mal->rxde_irq != NO_IRQ)
+ free_irq(mal->rxde_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ num_phys_chans = mal->num_tx_chans;
+ if (mal->coalesce_disabled == 0) {
+ for (i = 0; i < num_phys_chans; i++) {
+ if (mal->txcoal_irq[i] != NO_IRQ)
+ free_irq(mal->txcoal_irq[i], mal);
+ if (mal->rxcoal_irq[i] != NO_IRQ)
+ free_irq(mal->rxcoal_irq[i], mal);
+ }
+ } else {
+#endif
+ if (mal->txeob_irq != NO_IRQ)
+ free_irq(mal->txeob_irq, mal);
+ if (mal->rxeob_irq != NO_IRQ)
+ free_irq(mal->rxeob_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ }
+#endif
mal_reset(mal);
mal_dbg_unregister(mal);
- dma_free_coherent(&ofdev->dev,
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ ocm_free(mal->bd_virt);
+ else
+ dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index 9ededfbf072..a52dd75b1b2 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -118,10 +118,12 @@
#define MAL_TXCARR 0x05
#define MAL_TXEOBISR 0x06
#define MAL_TXDEIR 0x07
+#define MAL_TXBADDR 0x09
#define MAL_RXCASR 0x10
#define MAL_RXCARR 0x11
#define MAL_RXEOBISR 0x12
#define MAL_RXDEIR 0x13
+#define MAL_RXBADDR 0x15
#define MAL_TXCTPR(n) ((n) + 0x20)
#define MAL_RXCTPR(n) ((n) + 0x40)
#define MAL_RCBS(n) ((n) + 0x60)
@@ -169,7 +171,71 @@ struct mal_descriptor {
#define MAL_TX_CTRL_LAST 0x1000
#define MAL_TX_CTRL_INTR 0x0400
+#define MAL_DESC_MEM_SDRAM 0x0
+#define MAL_DESC_MEM_OCM 0x1
+
+#if defined(CONFIG_405EX)
+#define DCRN_SDR0_ICCRTX 0x430B /* Int coal Tx control register */
+#define DCRN_SDR0_ICCRRX 0x430C /* Int coal Rx control register */
+#define SDR0_ICC_FTHR0_SHIFT 23
+#define SDR0_ICC_FLUSH0 22
+#define SDR0_ICC_FLUWI0 21
+#define SDR0_ICC_FTHR1_SHIFT 12
+#define SDR0_ICC_FLUSH1 11
+#define SDR0_ICC_FLUWI1 10
+#define DCRN_SDR0_ICCTRTX0 0x430D /* Int coal Tx0 count threshold */
+#define DCRN_SDR0_ICCTRTX1 0x430E /* Int coal Tx1 count threshold */
+#define DCRN_SDR0_ICCTRRX0 0x430F /* Int coal Rx0 count threshold */
+#define DCRN_SDR0_ICCTRRX1 0x4310 /* Int coal Rx1 count threshold */
+#define DCRN_SDR0_ICTSRTX0 0x4307 /* Int coal Tx0 timer status*/
+#define DCRN_SDR0_ICTSRTX1 0x4308 /* Int coal Tx1 timer status*/
+#define DCRN_SDR0_ICTSRRX0 0x4309 /* Int coal Rx0 timer status*/
+#define DCRN_SDR0_ICTSRRX1 0x430A /* Int coal Rx1 timer status*/
+#elif defined(CONFIG_APM82181)
+#define DCRN_SDR0_ICCRTX0 0x4410 /* Int coal Tx0 control register */
+#define DCRN_SDR0_ICCRRX0 0x4414 /* Int coal Rx0 control register */
+#define SDR0_ICC_FTHR_SHIFT 23
+#define SDR0_ICC_FLUSH 22
+#define SDR0_ICC_FLUWI 21
+#define DCRN_SDR0_ICCTRTX0 0x4418 /* Int coal Tx0 count threshold */
+#define DCRN_SDR0_ICCTRRX0 0x441C /* Int coal Rx0 count threshold */
+#define DCRN_SDR0_ICTSRTX0 0x4420 /* Int coal Tx0 timer status*/
+#define DCRN_SDR0_ICTSRRX0 0x4424 /* Int coal Rx0 timer status*/
+#elif defined(CONFIG_460EX) || defined(CONFIG_460GT) || defined(CONFIG_460SX)
+#define DCRN_SDR0_ICCRTX0 0x4410 /* Int coal Tx0 control register */
+#define DCRN_SDR0_ICCRTX1 0x4411 /* Int coal Tx1 control register */
+#define DCRN_SDR0_ICCRTX2 0x4412 /* Int coal Tx2 control register */
+#define DCRN_SDR0_ICCRTX3 0x4413 /* Int coal Tx3 control register */
+#define DCRN_SDR0_ICCRRX0 0x4414 /* Int coal Rx0 control register */
+#define DCRN_SDR0_ICCRRX1 0x4415 /* Int coal Rx1 control register */
+#define DCRN_SDR0_ICCRRX2 0x4416 /* Int coal Rx2 control register */
+#define DCRN_SDR0_ICCRRX3 0x4417 /* Int coal Rx3 control register */
+#define SDR0_ICC_FTHR_SHIFT 23
+#define SDR0_ICC_FLUSH 22
+#define SDR0_ICC_FLUWI 21
+#define DCRN_SDR0_ICCTRTX0 0x4418 /* Int coal Tx0 count threshold */
+#define DCRN_SDR0_ICCTRTX1 0x4419 /* Int coal Tx1 count threshold */
+#define DCRN_SDR0_ICCTRTX2 0x441A /* Int coal Tx2 count threshold */
+#define DCRN_SDR0_ICCTRTX3 0x441B /* Int coal Tx3 count threshold */
+#define DCRN_SDR0_ICCTRRX0 0x441C /* Int coal Rx0 count threshold */
+#define DCRN_SDR0_ICCTRRX1 0x441D /* Int coal Rx1 count threshold */
+#define DCRN_SDR0_ICCTRRX2 0x441E /* Int coal Rx2 count threshold */
+#define DCRN_SDR0_ICCTRRX3 0x441F /* Int coal Rx3 count threshold */
+#define DCRN_SDR0_ICTSRTX0 0x4420 /* Int coal Tx0 timer status*/
+#define DCRN_SDR0_ICTSRTX1 0x4421 /* Int coal Tx1 timer status*/
+#define DCRN_SDR0_ICTSRTX2 0x4422 /* Int coal Tx2 timer status*/
+#define DCRN_SDR0_ICTSRTX3 0x4423 /* Int coal Tx3 timer status*/
+#define DCRN_SDR0_ICTSRRX0 0x4424 /* Int coal Rx0 timer status*/
+#define DCRN_SDR0_ICTSRRX1 0x4425 /* Int coal Rx1 timer status*/
+#define DCRN_SDR0_ICTSRRX2 0x4426 /* Int coal Rx2 timer status*/
+#define DCRN_SDR0_ICTSRRX3 0x4427 /* Int coal Rx3 timer status*/
+#endif
+
+#define COAL_FRAME_MASK 0x1FF
+#define MAL_MAX_PHYS_CHANNELS 4
+
struct mal_commac_ops {
+ void (*reset) (void *dev);
void (*poll_tx) (void *dev);
int (*poll_rx) (void *dev, int budget);
int (*peek_rx) (void *dev);
@@ -188,10 +254,22 @@ struct mal_commac {
struct list_head list;
};
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+struct mal_coales_param
+{
+ /* Configuration parameters for the coalescing function */
+ int tx_count;
+ int tx_time;
+ int rx_count;
+ int rx_time;
+};
+#endif
+
struct mal_instance {
int version;
dcr_host_t dcr_host;
+ int desc_memory; /* SDRAM or OCM */
int num_tx_chans; /* Number of TX channels */
int num_rx_chans; /* Number of RX channels */
int txeob_irq; /* TX End Of Buffer IRQ */
@@ -200,6 +278,27 @@ struct mal_instance {
int rxde_irq; /* RX Descriptor Error IRQ */
int serr_irq; /* MAL System Error IRQ */
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+
+ int txcoal0_irq; /* COAL */
+ int txcoal1_irq; /* COAL */
+ int txcoal2_irq; /* COAL */
+ int txcoal3_irq; /* COAL */
+ int rxcoal0_irq; /* COAL */
+ int rxcoal1_irq; /* COAL */
+ int rxcoal2_irq; /* COAL */
+ int rxcoal3_irq; /* COAL */
+
+ struct mal_coales_param coales_param[4];
+ /* add copy of iccrtx and iccrrx registers
+ * to bypass the bug on the 440EPX pass1 where these
+ * registers are write only
+ */
+ u32 enet_coales_iccrtx;
+ u32 enet_coales_iccrrx;
+ struct timer_list mal_coal_timer;
+#endif
+
struct list_head poll_list;
struct napi_struct napi;
@@ -208,6 +307,7 @@ struct mal_instance {
u32 rx_chan_mask;
dma_addr_t bd_dma;
+ phys_addr_t bd_phys;
struct mal_descriptor *bd_virt;
struct of_device *ofdev;
@@ -217,6 +317,11 @@ struct mal_instance {
struct net_device dummy_dev;
unsigned int features;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ int txcoal_irq[MAL_MAX_PHYS_CHANNELS]; /* MAL TxCoalesce Error IRQ */
+ int rxcoal_irq[MAL_MAX_PHYS_CHANNELS]; /* MAL RxCoalesce IRQ */
+ int coalesce_disabled; /* Coalesce disable flag */
+#endif
};
static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg)
@@ -284,6 +389,9 @@ void mal_disable_rx_channel(struct mal_instance *mal, int channel);
void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac);
void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+void mal_enable_coal(struct mal_instance *mal);
+#endif
/* Add/remove EMAC to/from MAL polling list */
void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac);
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index ac9d964e59e..635cb96ef64 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -52,7 +52,7 @@ int emac_mii_reset_phy(struct mii_phy *phy)
{
int val;
int limit = 10000;
-
+#ifndef CONFIG_APOLLO3G
val = phy_read(phy, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
val |= BMCR_RESET;
@@ -68,7 +68,7 @@ int emac_mii_reset_phy(struct mii_phy *phy)
}
if ((val & BMCR_ISOLATE) && limit > 0)
phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
-
+#endif /* CONFIG_APOLLO3G */
return limit <= 0;
}
@@ -359,7 +359,51 @@ static struct mii_phy_def bcm5248_phy_def = {
.name = "BCM5248 10/100 SMII Ethernet",
.ops = &generic_phy_ops
};
+#ifdef CONFIG_APOLLO3G
+static int bcm54610_init(struct mii_phy *phy)
+{
+ int regb, rega;
+
+ phy_write(phy, 0x1C, 0x2C00);
+ regb = phy_read(phy, 0x1C);
+
+ phy_write(phy, 0x1C, 0xAC8C);
+
+ phy_write(phy, 0x1C, 0x2C00);
+ rega = phy_read(phy, 0x1C);
+
+ printk(KERN_INFO "%s: before 0x%04x, after 0x%04x\n",
+ __FUNCTION__, (regb & 0xffff), (rega & 0xffff));
+
+ /* the RGMII interface is not half-duplex capable */
+ rega = phy_read(phy, 0x04);
+ phy_write(phy, 0x04, rega & ~0x00a0);
+
+ regb = phy_read(phy, 0x09);
+ phy_write(phy, 0x09, regb & ~0x0100);
+
+ printk(KERN_INFO "%s: before 0x%04x, 0x%04x; after 0x%04x, 0x%04x\n",
+ __FUNCTION__, (rega & 0xffff), (regb & 0xffff),
+ (phy_read(phy, 0x04) & 0xffff), (phy_read(phy, 0x09) & 0xffff));
+
+ return 0;
+}
+
+static struct mii_phy_ops bcm54610_phy_ops = {
+ .init = bcm54610_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+static struct mii_phy_def bcm54610_phy_def = {
+ .phy_id = 0x0143BD63,
+ .phy_id_mask = 0xffffffff,
+ .name = "BCM54610 Gigabit Ethernet",
+ .ops = &bcm54610_phy_ops
+};
+#endif
static int m88e1111_init(struct mii_phy *phy)
{
pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
@@ -400,6 +444,111 @@ static int m88e1112_init(struct mii_phy *phy)
return 0;
}
+static int m88e1141_init(struct mii_phy *phy)
+{
+ unsigned short data;
+
+ printk(KERN_CRIT "we go to init for %d\n", phy->mode);
+ switch (phy->mode) {
+ case PHY_MODE_GMII:
+#if defined(CONFIG_M88E1141_DEBUG)
+ data = phy_read(phy, 0x00);
+ data |= 0x2000; /* Speed Select 1000Mbps */
+ phy_write(phy, 0x00, data);
+ data = phy_read(phy, 0x14);
+ data |= 0x0010; /* GMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x8000; /* Auto Selection = Disable */
+ data |= 0x0400; /* Interrupt Polarity = Active Low */
+ data |= 0x0080; /* DTE Detect Status wait time */
+ data |= 0x000F; /* HWCFG_MODE = GMII */
+ phy_write(phy, 0x1B, data);
+ data = phy_read(phy, 0x04);
+ data |= 0x0C00; /* Async Pause + Pause */
+ data |= 0x01E0; /* 100FDX + 100HDX + 10FDX + 10HDX */
+ phy_write(phy, 0x04, data);
+ data = phy_read(phy, 0x09);
+ //data |= 0x1C00; /* Master/Slave Config */
+ data |= 0x0300; /* 1000FDX + 1000HDX */
+ phy_write(phy, 0x09, data);
+#else
+ data = phy_read(phy, 0x14);
+ data |= 0x0010; /* GMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x000F; /* HWCFG_MODE = GMII */
+ phy_write(phy, 0x1B, data);
+#endif
+ break;
+ case PHY_MODE_RGMII:
+#if defined(CONFIG_M88E1141_DEBUG)
+ data = phy_read(phy, 0x00);
+ data |= 0x2000; /* Speed Select 1000Mbps */
+ phy_write(phy, 0x00, data);
+ data = phy_read(phy, 0x14);
+ data |= 0x0080; /* RGMII RX Timing Control */
+ data |= 0x0002; /* RGMII TX Timing Control */
+ data |= 0x0050; /* RGMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x8000; /* Auto Selection = Disable */
+ data |= 0x0400; /* Interrupt Polarity = Active Low */
+ data |= 0x0080; /* DTE Detect Status wait time */
+ data |= 0x000B; /* HWCFG_MODE = RGMII */
+ phy_write(phy, 0x1B, data);
+ data = phy_read(phy, 0x04);
+ data |= 0x0C00; /* Async Pause + Pause */
+ data |= 0x01E0; /* 100FDX + 100HDX + 10FDX + 10HDX */
+ phy_write(phy, 0x04, data);
+ data = phy_read(phy, 0x09);
+ //data |= 0x1C00; /* Master/Slave Config */
+ data |= 0x0300; /* 1000FDX + 1000HDX */
+ phy_write(phy, 0x09, data);
+#else
+ data = phy_read(phy, 0x14);
+ data |= 0x0080; /* RGMII RX Timing Control */
+ data |= 0x0002; /* RGMII TX Timing Control */
+ data |= 0x0050; /* RGMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x000B; /* HWCFG_MODE = RGMII */
+ phy_write(phy, 0x1B, data);
+#endif
+ break;
+ case PHY_MODE_SGMII:
+ data = phy_read(phy, 0x14);
+ data &= ~0x0080; /* CLEAR - RGMII setting */
+ data &= ~0x0002; /* CLEAR - RGMII setting */
+ data &= ~0x0070; /* CLEAR - Default MAC speed */
+ data |= 0x0070; /* GMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+
+ data = phy_read(phy, 0x1B);
+ data |= 0x8000; /* Auto Selection = Disable */
+ data &= ~0x0400; /* Interrupt Polarity = Active Low */
+ data |= 0x0120; /* DTE Detect Status wait time */
+ data &= ~0x000F;/* CLEAR - HWCFG_MODE setting */
+ data |= 0x0000; /* HWCFG_MODE = SGMII */
+ phy_write(phy, 0x1B, data);
+
+ phy_write(phy, 0x10, 0x0068);
+ phy_write(phy, 0x16, 0x0001);
+ phy_write(phy, 0x00, 0x8100);
+ phy_write(phy, 0x16, 0x0000);
+ break;
+ }
+
+#if 0
+ data = phy_read(phy, 0x00);
+ data |= 0x8000; /* Reset PHY */
+ phy_write(phy, 0x00, data);
+ udelay(1000);
+#endif
+
+ return 0;
+}
+
static int et1011c_init(struct mii_phy *phy)
{
u16 reg_short;
@@ -467,12 +616,31 @@ static struct mii_phy_def m88e1112_phy_def = {
.ops = &m88e1112_phy_ops,
};
+static struct mii_phy_ops m88e1141_phy_ops = {
+ .init = m88e1141_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def m88e1141_phy_def = {
+ .phy_id = 0x01410CD0,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Marvell 88E1141 Ethernet",
+ .ops = &m88e1141_phy_ops,
+};
+
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
+#ifdef CONFIG_APOLLO3G
+ &bcm54610_phy_def,
+#endif
&m88e1111_phy_def,
&m88e1112_phy_def,
+ &m88e1141_phy_def,
&genmii_phy_def,
NULL
};
@@ -487,7 +655,11 @@ int emac_mii_phy_probe(struct mii_phy *phy, int address)
phy->advertising = 0;
phy->address = address;
phy->speed = SPEED_10;
+#ifndef CONFIG_APOLLO3G
phy->duplex = DUPLEX_HALF;
+#else
+ phy->duplex = DUPLEX_FULL;
+#endif
phy->pause = phy->asym_pause = 0;
/* Take PHY out of isolate mode and reset it. */
@@ -511,24 +683,36 @@ int emac_mii_phy_probe(struct mii_phy *phy, int address)
u16 bmsr = phy_read(phy, MII_BMSR);
if (bmsr & BMSR_ANEGCAPABLE)
phy->features |= SUPPORTED_Autoneg;
+#ifndef CONFIG_APOLLO3G
if (bmsr & BMSR_10HALF)
phy->features |= SUPPORTED_10baseT_Half;
+#endif
if (bmsr & BMSR_10FULL)
phy->features |= SUPPORTED_10baseT_Full;
+#ifndef CONFIG_APOLLO3G
if (bmsr & BMSR_100HALF)
phy->features |= SUPPORTED_100baseT_Half;
+#endif
if (bmsr & BMSR_100FULL)
phy->features |= SUPPORTED_100baseT_Full;
if (bmsr & BMSR_ESTATEN) {
u16 esr = phy_read(phy, MII_ESTATUS);
if (esr & ESTATUS_1000_TFULL)
phy->features |= SUPPORTED_1000baseT_Full;
+#ifndef CONFIG_APOLLO3G
if (esr & ESTATUS_1000_THALF)
phy->features |= SUPPORTED_1000baseT_Half;
+#endif
}
phy->features |= SUPPORTED_MII;
}
+#if (defined CONFIG_APM82181) /* RGMII does not support half-duplex */
+ phy->features &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+#endif
+
/* Setup default advertising */
phy->advertising = phy->features;
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 8d76cb89dbd..98048d8b667 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -222,7 +222,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
* rgmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
- memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
+ memcpy(regs, dev->base, sizeof(struct rgmii_regs));
return regs + 1;
}
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 30173a9fb55..8d31b4a2c91 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -61,7 +61,7 @@ void tah_reset(struct of_device *ofdev)
/* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
out_be32(&p->mr,
- TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
+ TAH_MR_CVR | TAH_MR_ST_256 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
TAH_MR_DIG);
}
@@ -82,7 +82,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
* zmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
- memcpy_fromio(regs, dev->base, sizeof(struct tah_regs));
+ memcpy(regs, dev->base, sizeof(struct tah_regs));
return regs + 1;
}
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 17b15412494..edb710c4a35 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -225,7 +225,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
* zmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
- memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
+ memcpy(regs, dev->base, sizeof(struct zmii_regs));
return regs + 1;
}