diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/gianfar.c')
| -rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 1668 | 
1 files changed, 859 insertions, 809 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index c4eaadeb572..a6cf40e62f3 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -9,7 +9,7 @@   * Maintainer: Kumar Gala   * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>   * - * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.   * Copyright 2007 MontaVista Software, Inc.   *   * This program is free software; you can redistribute  it and/or modify it @@ -70,7 +70,6 @@  #include <linux/unistd.h>  #include <linux/slab.h>  #include <linux/interrupt.h> -#include <linux/init.h>  #include <linux/delay.h>  #include <linux/netdevice.h>  #include <linux/etherdevice.h> @@ -78,6 +77,8 @@  #include <linux/if_vlan.h>  #include <linux/spinlock.h>  #include <linux/mm.h> +#include <linux/of_address.h> +#include <linux/of_irq.h>  #include <linux/of_mdio.h>  #include <linux/of_platform.h>  #include <linux/ip.h> @@ -88,6 +89,7 @@  #include <asm/io.h>  #include <asm/reg.h> +#include <asm/mpc85xx.h>  #include <asm/irq.h>  #include <asm/uaccess.h>  #include <linux/module.h> @@ -119,7 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);  static irqreturn_t gfar_transmit(int irq, void *dev_id);  static irqreturn_t gfar_interrupt(int irq, void *dev_id);  static void adjust_link(struct net_device *dev); -static void init_registers(struct net_device *dev); +static noinline void gfar_update_link_state(struct gfar_private *priv);  static int init_phy(struct net_device *dev);  static int gfar_probe(struct platform_device *ofdev);  static int gfar_remove(struct platform_device *ofdev); @@ -127,8 +129,10 @@ static void free_skb_resources(struct gfar_private *priv);  static void gfar_set_multi(struct net_device *dev);  static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);  static void gfar_configure_serdes(struct net_device *dev); -static int gfar_poll(struct napi_struct *napi, int budget); -static int gfar_poll_sq(struct napi_struct *napi, int budget); +static int gfar_poll_rx(struct napi_struct *napi, int budget); +static int gfar_poll_tx(struct napi_struct *napi, int budget); +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);  #ifdef CONFIG_NET_POLL_CONTROLLER  static void gfar_netpoll(struct net_device *dev);  #endif @@ -136,9 +140,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);  static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);  static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,  			       int amount_pull, struct napi_struct *napi); -void gfar_halt(struct net_device *dev); -static void gfar_halt_nodisable(struct net_device *dev); -void gfar_start(struct net_device *dev); +static void gfar_halt_nodisable(struct gfar_private *priv);  static void gfar_clear_exact_match(struct net_device *dev);  static void gfar_set_mac_for_addr(struct net_device *dev, int num,  				  const u8 *addr); @@ -330,72 +332,76 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)  	}  } -static void gfar_init_mac(struct net_device *ndev) +static void gfar_rx_buff_size_config(struct gfar_private *priv)  { -	struct gfar_private *priv = netdev_priv(ndev); -	struct gfar __iomem *regs = priv->gfargrp[0].regs; -	u32 rctrl = 0; -	u32 tctrl = 0; -	u32 attrs = 0; - -	/* write the tx/rx base registers */ -	gfar_init_tx_rx_base(priv); - -	/* Configure the coalescing support */ -	gfar_configure_coalescing_all(priv); +	int frame_size = priv->ndev->mtu + ETH_HLEN;  	/* set this when rx hw offload (TOE) functions are being used */  	priv->uses_rxfcb = 0; +	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) +		priv->uses_rxfcb = 1; + +	if (priv->hwts_rx_en) +		priv->uses_rxfcb = 1; + +	if (priv->uses_rxfcb) +		frame_size += GMAC_FCB_LEN; + +	frame_size += priv->padding; + +	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + +		     INCREMENTAL_BUFFER_SIZE; + +	priv->rx_buffer_size = frame_size; +} + +static void gfar_mac_rx_config(struct gfar_private *priv) +{ +	struct gfar __iomem *regs = priv->gfargrp[0].regs; +	u32 rctrl = 0; +  	if (priv->rx_filer_enable) {  		rctrl |= RCTRL_FILREN;  		/* Program the RIR0 reg with the required distribution */ -		gfar_write(®s->rir0, DEFAULT_RIR0); +		if (priv->poll_mode == GFAR_SQ_POLLING) +			gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); +		else /* GFAR_MQ_POLLING */ +			gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);  	}  	/* Restore PROMISC mode */ -	if (ndev->flags & IFF_PROMISC) +	if (priv->ndev->flags & IFF_PROMISC)  		rctrl |= RCTRL_PROM; -	if (ndev->features & NETIF_F_RXCSUM) { +	if (priv->ndev->features & NETIF_F_RXCSUM)  		rctrl |= RCTRL_CHECKSUMMING; -		priv->uses_rxfcb = 1; -	} - -	if (priv->extended_hash) { -		rctrl |= RCTRL_EXTHASH; -		gfar_clear_exact_match(ndev); -		rctrl |= RCTRL_EMEN; -	} +	if (priv->extended_hash) +		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;  	if (priv->padding) {  		rctrl &= ~RCTRL_PAL_MASK;  		rctrl |= RCTRL_PADDING(priv->padding);  	} -	/* Insert receive time stamps into padding alignment bytes */ -	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { -		rctrl &= ~RCTRL_PAL_MASK; -		rctrl |= RCTRL_PADDING(8); -		priv->padding = 8; -	} -  	/* Enable HW time stamping if requested from user space */ -	if (priv->hwts_rx_en) { +	if (priv->hwts_rx_en)  		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; -		priv->uses_rxfcb = 1; -	} -	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { +	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)  		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; -		priv->uses_rxfcb = 1; -	}  	/* Init rctrl based on our settings */  	gfar_write(®s->rctrl, rctrl); +} -	if (ndev->features & NETIF_F_IP_CSUM) +static void gfar_mac_tx_config(struct gfar_private *priv) +{ +	struct gfar __iomem *regs = priv->gfargrp[0].regs; +	u32 tctrl = 0; + +	if (priv->ndev->features & NETIF_F_IP_CSUM)  		tctrl |= TCTRL_INIT_CSUM;  	if (priv->prio_sched_en) @@ -406,30 +412,51 @@ static void gfar_init_mac(struct net_device *ndev)  		gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);  	} -	gfar_write(®s->tctrl, tctrl); +	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) +		tctrl |= TCTRL_VLINS; -	/* Set the extraction length and index */ -	attrs = ATTRELI_EL(priv->rx_stash_size) | -		ATTRELI_EI(priv->rx_stash_index); +	gfar_write(®s->tctrl, tctrl); +} -	gfar_write(®s->attreli, attrs); +static void gfar_configure_coalescing(struct gfar_private *priv, +			       unsigned long tx_mask, unsigned long rx_mask) +{ +	struct gfar __iomem *regs = priv->gfargrp[0].regs; +	u32 __iomem *baddr; -	/* Start with defaults, and add stashing or locking -	 * depending on the approprate variables -	 */ -	attrs = ATTR_INIT_SETTINGS; +	if (priv->mode == MQ_MG_MODE) { +		int i = 0; -	if (priv->bd_stash_en) -		attrs |= ATTR_BDSTASH; +		baddr = ®s->txic0; +		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { +			gfar_write(baddr + i, 0); +			if (likely(priv->tx_queue[i]->txcoalescing)) +				gfar_write(baddr + i, priv->tx_queue[i]->txic); +		} -	if (priv->rx_stash_size != 0) -		attrs |= ATTR_BUFSTASH; +		baddr = ®s->rxic0; +		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { +			gfar_write(baddr + i, 0); +			if (likely(priv->rx_queue[i]->rxcoalescing)) +				gfar_write(baddr + i, priv->rx_queue[i]->rxic); +		} +	} else { +		/* Backward compatible case -- even if we enable +		 * multiple queues, there's only single reg to program +		 */ +		gfar_write(®s->txic, 0); +		if (likely(priv->tx_queue[0]->txcoalescing)) +			gfar_write(®s->txic, priv->tx_queue[0]->txic); -	gfar_write(®s->attr, attrs); +		gfar_write(®s->rxic, 0); +		if (unlikely(priv->rx_queue[0]->rxcoalescing)) +			gfar_write(®s->rxic, priv->rx_queue[0]->rxic); +	} +} -	gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); -	gfar_write(®s->fifo_tx_starve, priv->fifo_starve); -	gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); +void gfar_configure_coalescing_all(struct gfar_private *priv) +{ +	gfar_configure_coalescing(priv, 0xFF, 0xFF);  }  static struct net_device_stats *gfar_get_stats(struct net_device *dev) @@ -477,12 +504,27 @@ static const struct net_device_ops gfar_netdev_ops = {  #endif  }; -void lock_rx_qs(struct gfar_private *priv) +static void gfar_ints_disable(struct gfar_private *priv)  {  	int i; +	for (i = 0; i < priv->num_grps; i++) { +		struct gfar __iomem *regs = priv->gfargrp[i].regs; +		/* Clear IEVENT */ +		gfar_write(®s->ievent, IEVENT_INIT_CLEAR); -	for (i = 0; i < priv->num_rx_queues; i++) -		spin_lock(&priv->rx_queue[i]->rxlock); +		/* Initialize IMASK */ +		gfar_write(®s->imask, IMASK_INIT_CLEAR); +	} +} + +static void gfar_ints_enable(struct gfar_private *priv) +{ +	int i; +	for (i = 0; i < priv->num_grps; i++) { +		struct gfar __iomem *regs = priv->gfargrp[i].regs; +		/* Unmask the interrupts we look for */ +		gfar_write(®s->imask, IMASK_DEFAULT); +	}  }  void lock_tx_qs(struct gfar_private *priv) @@ -493,23 +535,50 @@ void lock_tx_qs(struct gfar_private *priv)  		spin_lock(&priv->tx_queue[i]->txlock);  } -void unlock_rx_qs(struct gfar_private *priv) +void unlock_tx_qs(struct gfar_private *priv)  {  	int i; -	for (i = 0; i < priv->num_rx_queues; i++) -		spin_unlock(&priv->rx_queue[i]->rxlock); +	for (i = 0; i < priv->num_tx_queues; i++) +		spin_unlock(&priv->tx_queue[i]->txlock);  } -void unlock_tx_qs(struct gfar_private *priv) +static int gfar_alloc_tx_queues(struct gfar_private *priv)  {  	int i; -	for (i = 0; i < priv->num_tx_queues; i++) -		spin_unlock(&priv->tx_queue[i]->txlock); +	for (i = 0; i < priv->num_tx_queues; i++) { +		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), +					    GFP_KERNEL); +		if (!priv->tx_queue[i]) +			return -ENOMEM; + +		priv->tx_queue[i]->tx_skbuff = NULL; +		priv->tx_queue[i]->qindex = i; +		priv->tx_queue[i]->dev = priv->ndev; +		spin_lock_init(&(priv->tx_queue[i]->txlock)); +	} +	return 0;  } -static void free_tx_pointers(struct gfar_private *priv) +static int gfar_alloc_rx_queues(struct gfar_private *priv) +{ +	int i; + +	for (i = 0; i < priv->num_rx_queues; i++) { +		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), +					    GFP_KERNEL); +		if (!priv->rx_queue[i]) +			return -ENOMEM; + +		priv->rx_queue[i]->rx_skbuff = NULL; +		priv->rx_queue[i]->qindex = i; +		priv->rx_queue[i]->dev = priv->ndev; +	} +	return 0; +} + +static void gfar_free_tx_queues(struct gfar_private *priv)  {  	int i; @@ -517,7 +586,7 @@ static void free_tx_pointers(struct gfar_private *priv)  		kfree(priv->tx_queue[i]);  } -static void free_rx_pointers(struct gfar_private *priv) +static void gfar_free_rx_queues(struct gfar_private *priv)  {  	int i; @@ -551,23 +620,26 @@ static void disable_napi(struct gfar_private *priv)  {  	int i; -	for (i = 0; i < priv->num_grps; i++) -		napi_disable(&priv->gfargrp[i].napi); +	for (i = 0; i < priv->num_grps; i++) { +		napi_disable(&priv->gfargrp[i].napi_rx); +		napi_disable(&priv->gfargrp[i].napi_tx); +	}  }  static void enable_napi(struct gfar_private *priv)  {  	int i; -	for (i = 0; i < priv->num_grps; i++) -		napi_enable(&priv->gfargrp[i].napi); +	for (i = 0; i < priv->num_grps; i++) { +		napi_enable(&priv->gfargrp[i].napi_rx); +		napi_enable(&priv->gfargrp[i].napi_tx); +	}  }  static int gfar_parse_group(struct device_node *np,  			    struct gfar_private *priv, const char *model)  {  	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; -	u32 *queue_mask;  	int i;  	for (i = 0; i < GFAR_NUM_IRQS; i++) { @@ -596,16 +668,52 @@ static int gfar_parse_group(struct device_node *np,  	grp->priv = priv;  	spin_lock_init(&grp->grplock);  	if (priv->mode == MQ_MG_MODE) { -		queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); -		grp->rx_bit_map = queue_mask ? -			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps); -		queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); -		grp->tx_bit_map = queue_mask ? -			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps); +		u32 *rxq_mask, *txq_mask; +		rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); +		txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); + +		if (priv->poll_mode == GFAR_SQ_POLLING) { +			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */ +			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); +			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); +		} else { /* GFAR_MQ_POLLING */ +			grp->rx_bit_map = rxq_mask ? +			*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); +			grp->tx_bit_map = txq_mask ? +			*txq_mask : (DEFAULT_MAPPING >> priv->num_grps); +		}  	} else {  		grp->rx_bit_map = 0xFF;  		grp->tx_bit_map = 0xFF;  	} + +	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses +	 * right to left, so we need to revert the 8 bits to get the q index +	 */ +	grp->rx_bit_map = bitrev8(grp->rx_bit_map); +	grp->tx_bit_map = bitrev8(grp->tx_bit_map); + +	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, +	 * also assign queues to groups +	 */ +	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { +		if (!grp->rx_queue) +			grp->rx_queue = priv->rx_queue[i]; +		grp->num_rx_queues++; +		grp->rstat |= (RSTAT_CLEAR_RHALT >> i); +		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); +		priv->rx_queue[i]->grp = grp; +	} + +	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { +		if (!grp->tx_queue) +			grp->tx_queue = priv->tx_queue[i]; +		grp->num_tx_queues++; +		grp->tstat |= (TSTAT_CLEAR_THALT >> i); +		priv->tqueue |= (TQUEUE_EN0 >> i); +		priv->tx_queue[i]->grp = grp; +	} +  	priv->num_grps++;  	return 0; @@ -626,13 +734,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)  	const u32 *stash_idx;  	unsigned int num_tx_qs, num_rx_qs;  	u32 *tx_queues, *rx_queues; +	unsigned short mode, poll_mode;  	if (!np || !of_device_is_available(np))  		return -ENODEV; -	/* parse the num of tx and rx queues */ +	if (of_device_is_compatible(np, "fsl,etsec2")) { +		mode = MQ_MG_MODE; +		poll_mode = GFAR_SQ_POLLING; +	} else { +		mode = SQ_SG_MODE; +		poll_mode = GFAR_SQ_POLLING; +	} + +	/* parse the num of HW tx and rx queues */  	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); -	num_tx_qs = tx_queues ? *tx_queues : 1; +	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); + +	if (mode == SQ_SG_MODE) { +		num_tx_qs = 1; +		num_rx_qs = 1; +	} else { /* MQ_MG_MODE */ +		/* get the actual number of supported groups */ +		unsigned int num_grps = of_get_available_child_count(np); + +		if (num_grps == 0 || num_grps > MAXGROUPS) { +			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", +				num_grps); +			pr_err("Cannot do alloc_etherdev, aborting\n"); +			return -EINVAL; +		} + +		if (poll_mode == GFAR_SQ_POLLING) { +			num_tx_qs = num_grps; /* one txq per int group */ +			num_rx_qs = num_grps; /* one rxq per int group */ +		} else { /* GFAR_MQ_POLLING */ +			num_tx_qs = tx_queues ? *tx_queues : 1; +			num_rx_qs = rx_queues ? *rx_queues : 1; +		} +	}  	if (num_tx_qs > MAX_TX_QS) {  		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", @@ -641,9 +781,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)  		return -EINVAL;  	} -	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); -	num_rx_qs = rx_queues ? *rx_queues : 1; -  	if (num_rx_qs > MAX_RX_QS) {  		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",  		       num_rx_qs, MAX_RX_QS); @@ -659,10 +796,20 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)  	priv = netdev_priv(dev);  	priv->ndev = dev; +	priv->mode = mode; +	priv->poll_mode = poll_mode; +  	priv->num_tx_queues = num_tx_qs;  	netif_set_real_num_rx_queues(dev, num_rx_qs);  	priv->num_rx_queues = num_rx_qs; -	priv->num_grps = 0x0; + +	err = gfar_alloc_tx_queues(priv); +	if (err) +		goto tx_alloc_failed; + +	err = gfar_alloc_rx_queues(priv); +	if (err) +		goto rx_alloc_failed;  	/* Init Rx queue filer rule set linked list */  	INIT_LIST_HEAD(&priv->rx_list.list); @@ -675,52 +822,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)  		priv->gfargrp[i].regs = NULL;  	/* Parse and initialize group specific information */ -	if (of_device_is_compatible(np, "fsl,etsec2")) { -		priv->mode = MQ_MG_MODE; +	if (priv->mode == MQ_MG_MODE) {  		for_each_child_of_node(np, child) {  			err = gfar_parse_group(child, priv, model);  			if (err)  				goto err_grp_init;  		} -	} else { -		priv->mode = SQ_SG_MODE; +	} else { /* SQ_SG_MODE */  		err = gfar_parse_group(np, priv, model);  		if (err)  			goto err_grp_init;  	} -	for (i = 0; i < priv->num_tx_queues; i++) -		priv->tx_queue[i] = NULL; -	for (i = 0; i < priv->num_rx_queues; i++) -		priv->rx_queue[i] = NULL; - -	for (i = 0; i < priv->num_tx_queues; i++) { -		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), -					    GFP_KERNEL); -		if (!priv->tx_queue[i]) { -			err = -ENOMEM; -			goto tx_alloc_failed; -		} -		priv->tx_queue[i]->tx_skbuff = NULL; -		priv->tx_queue[i]->qindex = i; -		priv->tx_queue[i]->dev = dev; -		spin_lock_init(&(priv->tx_queue[i]->txlock)); -	} - -	for (i = 0; i < priv->num_rx_queues; i++) { -		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), -					    GFP_KERNEL); -		if (!priv->rx_queue[i]) { -			err = -ENOMEM; -			goto rx_alloc_failed; -		} -		priv->rx_queue[i]->rx_skbuff = NULL; -		priv->rx_queue[i]->qindex = i; -		priv->rx_queue[i]->dev = dev; -		spin_lock_init(&(priv->rx_queue[i]->rxlock)); -	} - -  	stash = of_get_property(np, "bd-stash", NULL);  	if (stash) { @@ -747,17 +860,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)  		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);  	if (model && !strcasecmp(model, "TSEC")) -		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | +		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |  				     FSL_GIANFAR_DEV_HAS_COALESCE |  				     FSL_GIANFAR_DEV_HAS_RMON |  				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;  	if (model && !strcasecmp(model, "eTSEC")) -		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | +		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |  				     FSL_GIANFAR_DEV_HAS_COALESCE |  				     FSL_GIANFAR_DEV_HAS_RMON |  				     FSL_GIANFAR_DEV_HAS_MULTI_INTR | -				     FSL_GIANFAR_DEV_HAS_PADDING |  				     FSL_GIANFAR_DEV_HAS_CSUM |  				     FSL_GIANFAR_DEV_HAS_VLAN |  				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | @@ -777,23 +889,33 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)  	priv->phy_node = of_parse_phandle(np, "phy-handle", 0); +	/* In the case of a fixed PHY, the DT node associated +	 * to the PHY is the Ethernet MAC DT node. +	 */ +	if (of_phy_is_fixed_link(np)) { +		err = of_phy_register_fixed_link(np); +		if (err) +			goto err_grp_init; + +		priv->phy_node = np; +	} +  	/* Find the TBI PHY.  If it's not there, we don't support SGMII */  	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);  	return 0; -rx_alloc_failed: -	free_rx_pointers(priv); -tx_alloc_failed: -	free_tx_pointers(priv);  err_grp_init:  	unmap_group_regs(priv); +rx_alloc_failed: +	gfar_free_rx_queues(priv); +tx_alloc_failed: +	gfar_free_tx_queues(priv);  	free_gfar_dev(priv);  	return err;  } -static int gfar_hwtstamp_ioctl(struct net_device *netdev, -			       struct ifreq *ifr, int cmd) +static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)  {  	struct hwtstamp_config config;  	struct gfar_private *priv = netdev_priv(netdev); @@ -821,18 +943,16 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,  	switch (config.rx_filter) {  	case HWTSTAMP_FILTER_NONE:  		if (priv->hwts_rx_en) { -			stop_gfar(netdev);  			priv->hwts_rx_en = 0; -			startup_gfar(netdev); +			reset_gfar(netdev);  		}  		break;  	default:  		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))  			return -ERANGE;  		if (!priv->hwts_rx_en) { -			stop_gfar(netdev);  			priv->hwts_rx_en = 1; -			startup_gfar(netdev); +			reset_gfar(netdev);  		}  		config.rx_filter = HWTSTAMP_FILTER_ALL;  		break; @@ -842,7 +962,20 @@ static int gfar_hwtstamp_ioctl(struct net_device *netdev,  		-EFAULT : 0;  } -/* Ioctl MII Interface */ +static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ +	struct hwtstamp_config config; +	struct gfar_private *priv = netdev_priv(netdev); + +	config.flags = 0; +	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; +	config.rx_filter = (priv->hwts_rx_en ? +			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + +	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? +		-EFAULT : 0; +} +  static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)  {  	struct gfar_private *priv = netdev_priv(dev); @@ -851,7 +984,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)  		return -EINVAL;  	if (cmd == SIOCSHWTSTAMP) -		return gfar_hwtstamp_ioctl(dev, rq, cmd); +		return gfar_hwtstamp_set(dev, rq); +	if (cmd == SIOCGHWTSTAMP) +		return gfar_hwtstamp_get(dev, rq);  	if (!priv->phydev)  		return -ENODEV; @@ -859,19 +994,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)  	return phy_mii_ioctl(priv->phydev, rq, cmd);  } -static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) -{ -	unsigned int new_bit_map = 0x0; -	int mask = 0x1 << (max_qs - 1), i; - -	for (i = 0; i < max_qs; i++) { -		if (bit_map & mask) -			new_bit_map = new_bit_map + (1 << i); -		mask = mask >> 0x1; -	} -	return new_bit_map; -} -  static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,  				   u32 class)  { @@ -939,9 +1061,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)  	}  } -static void gfar_detect_errata(struct gfar_private *priv) +static void __gfar_detect_errata_83xx(struct gfar_private *priv)  { -	struct device *dev = &priv->ofdev->dev;  	unsigned int pvr = mfspr(SPRN_PVR);  	unsigned int svr = mfspr(SPRN_SVR);  	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ @@ -957,114 +1078,173 @@ static void gfar_detect_errata(struct gfar_private *priv)  	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))  		priv->errata |= GFAR_ERRATA_76; -	/* MPC8313 and MPC837x all rev */ -	if ((pvr == 0x80850010 && mod == 0x80b0) || -	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) -		priv->errata |= GFAR_ERRATA_A002; - -	/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ -	if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || -	    (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) +	/* MPC8313 Rev < 2.0 */ +	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)  		priv->errata |= GFAR_ERRATA_12; - -	if (priv->errata) -		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", -			 priv->errata);  } -/* Set up the ethernet device structure, private data, - * and anything else we need before we start - */ -static int gfar_probe(struct platform_device *ofdev) +static void __gfar_detect_errata_85xx(struct gfar_private *priv)  { -	u32 tempval; -	struct net_device *dev = NULL; -	struct gfar_private *priv = NULL; -	struct gfar __iomem *regs = NULL; -	int err = 0, i, grp_idx = 0; -	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; -	u32 isrg = 0; -	u32 __iomem *baddr; - -	err = gfar_of_init(ofdev, &dev); +	unsigned int svr = mfspr(SPRN_SVR); -	if (err) -		return err; +	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) +		priv->errata |= GFAR_ERRATA_12; +	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || +	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) +		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ +} -	priv = netdev_priv(dev); -	priv->ndev = dev; -	priv->ofdev = ofdev; -	priv->dev = &ofdev->dev; -	SET_NETDEV_DEV(dev, &ofdev->dev); +static void gfar_detect_errata(struct gfar_private *priv) +{ +	struct device *dev = &priv->ofdev->dev; -	spin_lock_init(&priv->bflock); -	INIT_WORK(&priv->reset_task, gfar_reset_task); +	/* no plans to fix */ +	priv->errata |= GFAR_ERRATA_A002; -	platform_set_drvdata(ofdev, priv); -	regs = priv->gfargrp[0].regs; +	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) +		__gfar_detect_errata_85xx(priv); +	else /* non-mpc85xx parts, i.e. e300 core based */ +		__gfar_detect_errata_83xx(priv); -	gfar_detect_errata(priv); +	if (priv->errata) +		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", +			 priv->errata); +} -	/* Stop the DMA engine now, in case it was running before -	 * (The firmware could have used it, and left it running). -	 */ -	gfar_halt(dev); +void gfar_mac_reset(struct gfar_private *priv) +{ +	struct gfar __iomem *regs = priv->gfargrp[0].regs; +	u32 tempval;  	/* Reset MAC layer */  	gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);  	/* We need to delay at least 3 TX clocks */ -	udelay(2); +	udelay(3); -	tempval = 0; -	if (!priv->pause_aneg_en && priv->tx_pause_en) -		tempval |= MACCFG1_TX_FLOW; -	if (!priv->pause_aneg_en && priv->rx_pause_en) -		tempval |= MACCFG1_RX_FLOW;  	/* the soft reset bit is not self-resetting, so we need to  	 * clear it before resuming normal operation  	 */ -	gfar_write(®s->maccfg1, tempval); +	gfar_write(®s->maccfg1, 0); + +	udelay(3); + +	/* Compute rx_buff_size based on config flags */ +	gfar_rx_buff_size_config(priv); + +	/* Initialize the max receive frame/buffer lengths */ +	gfar_write(®s->maxfrm, priv->rx_buffer_size); +	gfar_write(®s->mrblr, priv->rx_buffer_size); + +	/* Initialize the Minimum Frame Length Register */ +	gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);  	/* Initialize MACCFG2. */  	tempval = MACCFG2_INIT_SETTINGS; -	if (gfar_has_errata(priv, GFAR_ERRATA_74)) + +	/* If the mtu is larger than the max size for standard +	 * ethernet frames (ie, a jumbo frame), then set maccfg2 +	 * to allow huge frames, and to check the length +	 */ +	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || +	    gfar_has_errata(priv, GFAR_ERRATA_74))  		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; +  	gfar_write(®s->maccfg2, tempval); +	/* Clear mac addr hash registers */ +	gfar_write(®s->igaddr0, 0); +	gfar_write(®s->igaddr1, 0); +	gfar_write(®s->igaddr2, 0); +	gfar_write(®s->igaddr3, 0); +	gfar_write(®s->igaddr4, 0); +	gfar_write(®s->igaddr5, 0); +	gfar_write(®s->igaddr6, 0); +	gfar_write(®s->igaddr7, 0); + +	gfar_write(®s->gaddr0, 0); +	gfar_write(®s->gaddr1, 0); +	gfar_write(®s->gaddr2, 0); +	gfar_write(®s->gaddr3, 0); +	gfar_write(®s->gaddr4, 0); +	gfar_write(®s->gaddr5, 0); +	gfar_write(®s->gaddr6, 0); +	gfar_write(®s->gaddr7, 0); + +	if (priv->extended_hash) +		gfar_clear_exact_match(priv->ndev); + +	gfar_mac_rx_config(priv); + +	gfar_mac_tx_config(priv); + +	gfar_set_mac_address(priv->ndev); + +	gfar_set_multi(priv->ndev); + +	/* clear ievent and imask before configuring coalescing */ +	gfar_ints_disable(priv); + +	/* Configure the coalescing support */ +	gfar_configure_coalescing_all(priv); +} + +static void gfar_hw_init(struct gfar_private *priv) +{ +	struct gfar __iomem *regs = priv->gfargrp[0].regs; +	u32 attrs; + +	/* Stop the DMA engine now, in case it was running before +	 * (The firmware could have used it, and left it running). +	 */ +	gfar_halt(priv); + +	gfar_mac_reset(priv); + +	/* Zero out the rmon mib registers if it has them */ +	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { +		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); + +		/* Mask off the CAM interrupts */ +		gfar_write(®s->rmon.cam1, 0xffffffff); +		gfar_write(®s->rmon.cam2, 0xffffffff); +	} +  	/* Initialize ECNTRL */  	gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); -	/* Set the dev->base_addr to the gfar reg region */ -	dev->base_addr = (unsigned long) regs; +	/* Set the extraction length and index */ +	attrs = ATTRELI_EL(priv->rx_stash_size) | +		ATTRELI_EI(priv->rx_stash_index); -	/* Fill in the dev structure */ -	dev->watchdog_timeo = TX_TIMEOUT; -	dev->mtu = 1500; -	dev->netdev_ops = &gfar_netdev_ops; -	dev->ethtool_ops = &gfar_ethtool_ops; +	gfar_write(®s->attreli, attrs); -	/* Register for napi ...We are registering NAPI for each grp */ -	if (priv->mode == SQ_SG_MODE) -		netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, -			       GFAR_DEV_WEIGHT); -	else -		for (i = 0; i < priv->num_grps; i++) -			netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, -				       GFAR_DEV_WEIGHT); +	/* Start with defaults, and add stashing +	 * depending on driver parameters +	 */ +	attrs = ATTR_INIT_SETTINGS; -	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { -		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | -				   NETIF_F_RXCSUM; -		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | -				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; -	} +	if (priv->bd_stash_en) +		attrs |= ATTR_BDSTASH; -	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { -		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | -				    NETIF_F_HW_VLAN_CTAG_RX; -		dev->features |= NETIF_F_HW_VLAN_CTAG_RX; -	} +	if (priv->rx_stash_size != 0) +		attrs |= ATTR_BUFSTASH; + +	gfar_write(®s->attr, attrs); + +	/* FIFO configs */ +	gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); +	gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); +	gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); + +	/* Program the interrupt steering regs, only for MG devices */ +	if (priv->num_grps > 1) +		gfar_write_isrg(priv); +} + +static void gfar_init_addr_hash_table(struct gfar_private *priv) +{ +	struct gfar __iomem *regs = priv->gfargrp[0].regs;  	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {  		priv->extended_hash = 1; @@ -1100,68 +1280,81 @@ static int gfar_probe(struct platform_device *ofdev)  		priv->hash_regs[6] = ®s->gaddr6;  		priv->hash_regs[7] = ®s->gaddr7;  	} +} -	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) -		priv->padding = DEFAULT_PADDING; -	else -		priv->padding = 0; +/* Set up the ethernet device structure, private data, + * and anything else we need before we start + */ +static int gfar_probe(struct platform_device *ofdev) +{ +	struct net_device *dev = NULL; +	struct gfar_private *priv = NULL; +	int err = 0, i; -	if (dev->features & NETIF_F_IP_CSUM || -	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) -		dev->needed_headroom = GMAC_FCB_LEN; +	err = gfar_of_init(ofdev, &dev); -	/* Program the isrg regs only if number of grps > 1 */ -	if (priv->num_grps > 1) { -		baddr = ®s->isrg0; -		for (i = 0; i < priv->num_grps; i++) { -			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); -			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); -			gfar_write(baddr, isrg); -			baddr++; -			isrg = 0x0; +	if (err) +		return err; + +	priv = netdev_priv(dev); +	priv->ndev = dev; +	priv->ofdev = ofdev; +	priv->dev = &ofdev->dev; +	SET_NETDEV_DEV(dev, &ofdev->dev); + +	spin_lock_init(&priv->bflock); +	INIT_WORK(&priv->reset_task, gfar_reset_task); + +	platform_set_drvdata(ofdev, priv); + +	gfar_detect_errata(priv); + +	/* Set the dev->base_addr to the gfar reg region */ +	dev->base_addr = (unsigned long) priv->gfargrp[0].regs; + +	/* Fill in the dev structure */ +	dev->watchdog_timeo = TX_TIMEOUT; +	dev->mtu = 1500; +	dev->netdev_ops = &gfar_netdev_ops; +	dev->ethtool_ops = &gfar_ethtool_ops; + +	/* Register for napi ...We are registering NAPI for each grp */ +	for (i = 0; i < priv->num_grps; i++) { +		if (priv->poll_mode == GFAR_SQ_POLLING) { +			netif_napi_add(dev, &priv->gfargrp[i].napi_rx, +				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT); +			netif_napi_add(dev, &priv->gfargrp[i].napi_tx, +				       gfar_poll_tx_sq, 2); +		} else { +			netif_napi_add(dev, &priv->gfargrp[i].napi_rx, +				       gfar_poll_rx, GFAR_DEV_WEIGHT); +			netif_napi_add(dev, &priv->gfargrp[i].napi_tx, +				       gfar_poll_tx, 2);  		}  	} -	/* Need to reverse the bit maps as  bit_map's MSB is q0 -	 * but, for_each_set_bit parses from right to left, which -	 * basically reverses the queue numbers -	 */ -	for (i = 0; i< priv->num_grps; i++) { -		priv->gfargrp[i].tx_bit_map = -			reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS); -		priv->gfargrp[i].rx_bit_map = -			reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS); +	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { +		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | +				   NETIF_F_RXCSUM; +		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | +				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;  	} -	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, -	 * also assign queues to groups -	 */ -	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { -		priv->gfargrp[grp_idx].num_rx_queues = 0x0; - -		for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, -				 priv->num_rx_queues) { -			priv->gfargrp[grp_idx].num_rx_queues++; -			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; -			rstat = rstat | (RSTAT_CLEAR_RHALT >> i); -			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); -		} -		priv->gfargrp[grp_idx].num_tx_queues = 0x0; - -		for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, -				 priv->num_tx_queues) { -			priv->gfargrp[grp_idx].num_tx_queues++; -			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; -			tstat = tstat | (TSTAT_CLEAR_THALT >> i); -			tqueue = tqueue | (TQUEUE_EN0 >> i); -		} -		priv->gfargrp[grp_idx].rstat = rstat; -		priv->gfargrp[grp_idx].tstat = tstat; -		rstat = tstat =0; +	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { +		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | +				    NETIF_F_HW_VLAN_CTAG_RX; +		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;  	} -	gfar_write(®s->rqueue, rqueue); -	gfar_write(®s->tqueue, tqueue); +	gfar_init_addr_hash_table(priv); + +	/* Insert receive time stamps into padding alignment bytes */ +	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) +		priv->padding = 8; + +	if (dev->features & NETIF_F_IP_CSUM || +	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) +		dev->needed_headroom = GMAC_FCB_LEN;  	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; @@ -1187,6 +1380,10 @@ static int gfar_probe(struct platform_device *ofdev)  	if (priv->num_tx_queues == 1)  		priv->prio_sched_en = 1; +	set_bit(GFAR_DOWN, &priv->state); + +	gfar_hw_init(priv); +  	/* Carrier starts down, phylib will bring it up */  	netif_carrier_off(dev); @@ -1218,9 +1415,6 @@ static int gfar_probe(struct platform_device *ofdev)  	/* Initialize the filer table */  	gfar_init_filer_table(priv); -	/* Create all the sysfs files */ -	gfar_init_sysfs(dev); -  	/* Print out the device info */  	netdev_info(dev, "mac: %pM\n", dev->dev_addr); @@ -1239,8 +1433,8 @@ static int gfar_probe(struct platform_device *ofdev)  register_fail:  	unmap_group_regs(priv); -	free_tx_pointers(priv); -	free_rx_pointers(priv); +	gfar_free_rx_queues(priv); +	gfar_free_tx_queues(priv);  	if (priv->phy_node)  		of_node_put(priv->phy_node);  	if (priv->tbi_node) @@ -1260,6 +1454,8 @@ static int gfar_remove(struct platform_device *ofdev)  	unregister_netdev(priv->ndev);  	unmap_group_regs(priv); +	gfar_free_rx_queues(priv); +	gfar_free_tx_queues(priv);  	free_gfar_dev(priv);  	return 0; @@ -1285,9 +1481,8 @@ static int gfar_suspend(struct device *dev)  		local_irq_save(flags);  		lock_tx_qs(priv); -		lock_rx_qs(priv); -		gfar_halt_nodisable(ndev); +		gfar_halt_nodisable(priv);  		/* Disable Tx, and Rx if wake-on-LAN is disabled. */  		tempval = gfar_read(®s->maccfg1); @@ -1299,7 +1494,6 @@ static int gfar_suspend(struct device *dev)  		gfar_write(®s->maccfg1, tempval); -		unlock_rx_qs(priv);  		unlock_tx_qs(priv);  		local_irq_restore(flags); @@ -1345,15 +1539,13 @@ static int gfar_resume(struct device *dev)  	 */  	local_irq_save(flags);  	lock_tx_qs(priv); -	lock_rx_qs(priv);  	tempval = gfar_read(®s->maccfg2);  	tempval &= ~MACCFG2_MPEN;  	gfar_write(®s->maccfg2, tempval); -	gfar_start(ndev); +	gfar_start(priv); -	unlock_rx_qs(priv);  	unlock_tx_qs(priv);  	local_irq_restore(flags); @@ -1380,10 +1572,11 @@ static int gfar_restore(struct device *dev)  		return -ENOMEM;  	} -	init_registers(ndev); -	gfar_set_mac_address(ndev); -	gfar_init_mac(ndev); -	gfar_start(ndev); +	gfar_mac_reset(priv); + +	gfar_init_tx_rx_base(priv); + +	gfar_start(priv);  	priv->oldlink = 0;  	priv->oldspeed = 0; @@ -1478,9 +1671,6 @@ static int init_phy(struct net_device *dev)  	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,  				      interface); -	if (!priv->phydev) -		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, -							 interface);  	if (!priv->phydev) {  		dev_err(&dev->dev, "could not attach to PHY\n");  		return -ENODEV; @@ -1541,57 +1731,6 @@ static void gfar_configure_serdes(struct net_device *dev)  		  BMCR_SPEED1000);  } -static void init_registers(struct net_device *dev) -{ -	struct gfar_private *priv = netdev_priv(dev); -	struct gfar __iomem *regs = NULL; -	int i; - -	for (i = 0; i < priv->num_grps; i++) { -		regs = priv->gfargrp[i].regs; -		/* Clear IEVENT */ -		gfar_write(®s->ievent, IEVENT_INIT_CLEAR); - -		/* Initialize IMASK */ -		gfar_write(®s->imask, IMASK_INIT_CLEAR); -	} - -	regs = priv->gfargrp[0].regs; -	/* Init hash registers to zero */ -	gfar_write(®s->igaddr0, 0); -	gfar_write(®s->igaddr1, 0); -	gfar_write(®s->igaddr2, 0); -	gfar_write(®s->igaddr3, 0); -	gfar_write(®s->igaddr4, 0); -	gfar_write(®s->igaddr5, 0); -	gfar_write(®s->igaddr6, 0); -	gfar_write(®s->igaddr7, 0); - -	gfar_write(®s->gaddr0, 0); -	gfar_write(®s->gaddr1, 0); -	gfar_write(®s->gaddr2, 0); -	gfar_write(®s->gaddr3, 0); -	gfar_write(®s->gaddr4, 0); -	gfar_write(®s->gaddr5, 0); -	gfar_write(®s->gaddr6, 0); -	gfar_write(®s->gaddr7, 0); - -	/* Zero out the rmon mib registers if it has them */ -	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { -		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); - -		/* Mask off the CAM interrupts */ -		gfar_write(®s->rmon.cam1, 0xffffffff); -		gfar_write(®s->rmon.cam2, 0xffffffff); -	} - -	/* Initialize the max receive buffer length */ -	gfar_write(®s->mrblr, priv->rx_buffer_size); - -	/* Initialize the Minimum Frame Length Register */ -	gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); -} -  static int __gfar_is_rx_idle(struct gfar_private *priv)  {  	u32 res; @@ -1599,7 +1738,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)  	/* Normaly TSEC should not hang on GRS commands, so we should  	 * actually wait for IEVENT_GRSC flag.  	 */ -	if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) +	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))  		return 0;  	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are @@ -1615,23 +1754,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)  }  /* Halt the receive and transmit queues */ -static void gfar_halt_nodisable(struct net_device *dev) +static void gfar_halt_nodisable(struct gfar_private *priv)  { -	struct gfar_private *priv = netdev_priv(dev); -	struct gfar __iomem *regs = NULL; +	struct gfar __iomem *regs = priv->gfargrp[0].regs;  	u32 tempval; -	int i; -	for (i = 0; i < priv->num_grps; i++) { -		regs = priv->gfargrp[i].regs; -		/* Mask all interrupts */ -		gfar_write(®s->imask, IMASK_INIT_CLEAR); - -		/* Clear all interrupts */ -		gfar_write(®s->ievent, IEVENT_INIT_CLEAR); -	} +	gfar_ints_disable(priv); -	regs = priv->gfargrp[0].regs;  	/* Stop the DMA, and wait for it to stop */  	tempval = gfar_read(®s->dmactrl);  	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != @@ -1652,56 +1781,41 @@ static void gfar_halt_nodisable(struct net_device *dev)  }  /* Halt the receive and transmit queues */ -void gfar_halt(struct net_device *dev) +void gfar_halt(struct gfar_private *priv)  { -	struct gfar_private *priv = netdev_priv(dev);  	struct gfar __iomem *regs = priv->gfargrp[0].regs;  	u32 tempval; -	gfar_halt_nodisable(dev); +	/* Dissable the Rx/Tx hw queues */ +	gfar_write(®s->rqueue, 0); +	gfar_write(®s->tqueue, 0); + +	mdelay(10); + +	gfar_halt_nodisable(priv); -	/* Disable Rx and Tx */ +	/* Disable Rx/Tx DMA */  	tempval = gfar_read(®s->maccfg1);  	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);  	gfar_write(®s->maccfg1, tempval);  } -static void free_grp_irqs(struct gfar_priv_grp *grp) -{ -	free_irq(gfar_irq(grp, TX)->irq, grp); -	free_irq(gfar_irq(grp, RX)->irq, grp); -	free_irq(gfar_irq(grp, ER)->irq, grp); -} -  void stop_gfar(struct net_device *dev)  {  	struct gfar_private *priv = netdev_priv(dev); -	unsigned long flags; -	int i; - -	phy_stop(priv->phydev); +	netif_tx_stop_all_queues(dev); -	/* Lock it down */ -	local_irq_save(flags); -	lock_tx_qs(priv); -	lock_rx_qs(priv); +	smp_mb__before_atomic(); +	set_bit(GFAR_DOWN, &priv->state); +	smp_mb__after_atomic(); -	gfar_halt(dev); +	disable_napi(priv); -	unlock_rx_qs(priv); -	unlock_tx_qs(priv); -	local_irq_restore(flags); +	/* disable ints and gracefully shut down Rx/Tx DMA */ +	gfar_halt(priv); -	/* Free the IRQs */ -	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { -		for (i = 0; i < priv->num_grps; i++) -			free_grp_irqs(&priv->gfargrp[i]); -	} else { -		for (i = 0; i < priv->num_grps; i++) -			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, -				 &priv->gfargrp[i]); -	} +	phy_stop(priv->phydev);  	free_skb_resources(priv);  } @@ -1792,17 +1906,15 @@ static void free_skb_resources(struct gfar_private *priv)  			  priv->tx_queue[0]->tx_bd_dma_base);  } -void gfar_start(struct net_device *dev) +void gfar_start(struct gfar_private *priv)  { -	struct gfar_private *priv = netdev_priv(dev);  	struct gfar __iomem *regs = priv->gfargrp[0].regs;  	u32 tempval;  	int i = 0; -	/* Enable Rx and Tx in MACCFG1 */ -	tempval = gfar_read(®s->maccfg1); -	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); -	gfar_write(®s->maccfg1, tempval); +	/* Enable Rx/Tx hw queues */ +	gfar_write(®s->rqueue, priv->rqueue); +	gfar_write(®s->tqueue, priv->tqueue);  	/* Initialize DMACTRL to have WWR and WOP */  	tempval = gfar_read(®s->dmactrl); @@ -1819,52 +1931,23 @@ void gfar_start(struct net_device *dev)  		/* Clear THLT/RHLT, so that the DMA starts polling now */  		gfar_write(®s->tstat, priv->gfargrp[i].tstat);  		gfar_write(®s->rstat, priv->gfargrp[i].rstat); -		/* Unmask the interrupts we look for */ -		gfar_write(®s->imask, IMASK_DEFAULT);  	} -	dev->trans_start = jiffies; /* prevent tx timeout */ -} - -static void gfar_configure_coalescing(struct gfar_private *priv, -			       unsigned long tx_mask, unsigned long rx_mask) -{ -	struct gfar __iomem *regs = priv->gfargrp[0].regs; -	u32 __iomem *baddr; - -	if (priv->mode == MQ_MG_MODE) { -		int i = 0; +	/* Enable Rx/Tx DMA */ +	tempval = gfar_read(®s->maccfg1); +	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); +	gfar_write(®s->maccfg1, tempval); -		baddr = ®s->txic0; -		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { -			gfar_write(baddr + i, 0); -			if (likely(priv->tx_queue[i]->txcoalescing)) -				gfar_write(baddr + i, priv->tx_queue[i]->txic); -		} +	gfar_ints_enable(priv); -		baddr = ®s->rxic0; -		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { -			gfar_write(baddr + i, 0); -			if (likely(priv->rx_queue[i]->rxcoalescing)) -				gfar_write(baddr + i, priv->rx_queue[i]->rxic); -		} -	} else { -		/* Backward compatible case -- even if we enable -		 * multiple queues, there's only single reg to program -		 */ -		gfar_write(®s->txic, 0); -		if (likely(priv->tx_queue[0]->txcoalescing)) -			gfar_write(®s->txic, priv->tx_queue[0]->txic); - -		gfar_write(®s->rxic, 0); -		if (unlikely(priv->rx_queue[0]->rxcoalescing)) -			gfar_write(®s->rxic, priv->rx_queue[0]->rxic); -	} +	priv->ndev->trans_start = jiffies; /* prevent tx timeout */  } -void gfar_configure_coalescing_all(struct gfar_private *priv) +static void free_grp_irqs(struct gfar_priv_grp *grp)  { -	gfar_configure_coalescing(priv, 0xFF, 0xFF); +	free_irq(gfar_irq(grp, TX)->irq, grp); +	free_irq(gfar_irq(grp, RX)->irq, grp); +	free_irq(gfar_irq(grp, ER)->irq, grp);  }  static int register_grp_irqs(struct gfar_priv_grp *grp) @@ -1923,46 +2006,65 @@ err_irq_fail:  } -/* Bring the controller up and running */ -int startup_gfar(struct net_device *ndev) +static void gfar_free_irq(struct gfar_private *priv)  { -	struct gfar_private *priv = netdev_priv(ndev); -	struct gfar __iomem *regs = NULL; -	int err, i, j; +	int i; -	for (i = 0; i < priv->num_grps; i++) { -		regs= priv->gfargrp[i].regs; -		gfar_write(®s->imask, IMASK_INIT_CLEAR); +	/* Free the IRQs */ +	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { +		for (i = 0; i < priv->num_grps; i++) +			free_grp_irqs(&priv->gfargrp[i]); +	} else { +		for (i = 0; i < priv->num_grps; i++) +			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, +				 &priv->gfargrp[i]);  	} +} -	regs= priv->gfargrp[0].regs; -	err = gfar_alloc_skb_resources(ndev); -	if (err) -		return err; - -	gfar_init_mac(ndev); +static int gfar_request_irq(struct gfar_private *priv) +{ +	int err, i, j;  	for (i = 0; i < priv->num_grps; i++) {  		err = register_grp_irqs(&priv->gfargrp[i]);  		if (err) {  			for (j = 0; j < i; j++)  				free_grp_irqs(&priv->gfargrp[j]); -			goto irq_fail; +			return err;  		}  	} -	/* Start the controller */ -	gfar_start(ndev); +	return 0; +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *ndev) +{ +	struct gfar_private *priv = netdev_priv(ndev); +	int err; + +	gfar_mac_reset(priv); + +	err = gfar_alloc_skb_resources(ndev); +	if (err) +		return err; + +	gfar_init_tx_rx_base(priv); + +	smp_mb__before_atomic(); +	clear_bit(GFAR_DOWN, &priv->state); +	smp_mb__after_atomic(); + +	/* Start Rx/Tx DMA and enable the interrupts */ +	gfar_start(priv);  	phy_start(priv->phydev); -	gfar_configure_coalescing_all(priv); +	enable_napi(priv); -	return 0; +	netif_tx_wake_all_queues(ndev); -irq_fail: -	free_skb_resources(priv); -	return err; +	return 0;  }  /* Called when something needs to use the ethernet device @@ -1973,27 +2075,17 @@ static int gfar_enet_open(struct net_device *dev)  	struct gfar_private *priv = netdev_priv(dev);  	int err; -	enable_napi(priv); - -	/* Initialize a bunch of registers */ -	init_registers(dev); - -	gfar_set_mac_address(dev); -  	err = init_phy(dev); +	if (err) +		return err; -	if (err) { -		disable_napi(priv); +	err = gfar_request_irq(priv); +	if (err)  		return err; -	}  	err = startup_gfar(dev); -	if (err) { -		disable_napi(priv); +	if (err)  		return err; -	} - -	netif_tx_start_all_queues(dev);  	device_set_wakeup_enable(&dev->dev, priv->wol_en); @@ -2119,13 +2211,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)  		skb_new = skb_realloc_headroom(skb, fcb_len);  		if (!skb_new) {  			dev->stats.tx_errors++; -			kfree_skb(skb); +			dev_kfree_skb_any(skb);  			return NETDEV_TX_OK;  		}  		if (skb->sk)  			skb_set_owner_w(skb_new, skb->sk); -		consume_skb(skb); +		dev_consume_skb_any(skb);  		skb = skb_new;  	} @@ -2318,8 +2410,6 @@ static int gfar_close(struct net_device *dev)  {  	struct gfar_private *priv = netdev_priv(dev); -	disable_napi(priv); -  	cancel_work_sync(&priv->reset_task);  	stop_gfar(dev); @@ -2327,7 +2417,7 @@ static int gfar_close(struct net_device *dev)  	phy_disconnect(priv->phydev);  	priv->phydev = NULL; -	netif_tx_stop_all_queues(dev); +	gfar_free_irq(priv);  	return 0;  } @@ -2340,77 +2430,9 @@ static int gfar_set_mac_address(struct net_device *dev)  	return 0;  } -/* Check if rx parser should be activated */ -void gfar_check_rx_parser_mode(struct gfar_private *priv) -{ -	struct gfar __iomem *regs; -	u32 tempval; - -	regs = priv->gfargrp[0].regs; - -	tempval = gfar_read(®s->rctrl); -	/* If parse is no longer required, then disable parser */ -	if (tempval & RCTRL_REQ_PARSER) { -		tempval |= RCTRL_PRSDEP_INIT; -		priv->uses_rxfcb = 1; -	} else { -		tempval &= ~RCTRL_PRSDEP_INIT; -		priv->uses_rxfcb = 0; -	} -	gfar_write(®s->rctrl, tempval); -} - -/* Enables and disables VLAN insertion/extraction */ -void gfar_vlan_mode(struct net_device *dev, netdev_features_t features) -{ -	struct gfar_private *priv = netdev_priv(dev); -	struct gfar __iomem *regs = NULL; -	unsigned long flags; -	u32 tempval; - -	regs = priv->gfargrp[0].regs; -	local_irq_save(flags); -	lock_rx_qs(priv); - -	if (features & NETIF_F_HW_VLAN_CTAG_TX) { -		/* Enable VLAN tag insertion */ -		tempval = gfar_read(®s->tctrl); -		tempval |= TCTRL_VLINS; -		gfar_write(®s->tctrl, tempval); -	} else { -		/* Disable VLAN tag insertion */ -		tempval = gfar_read(®s->tctrl); -		tempval &= ~TCTRL_VLINS; -		gfar_write(®s->tctrl, tempval); -	} - -	if (features & NETIF_F_HW_VLAN_CTAG_RX) { -		/* Enable VLAN tag extraction */ -		tempval = gfar_read(®s->rctrl); -		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); -		gfar_write(®s->rctrl, tempval); -		priv->uses_rxfcb = 1; -	} else { -		/* Disable VLAN tag extraction */ -		tempval = gfar_read(®s->rctrl); -		tempval &= ~RCTRL_VLEX; -		gfar_write(®s->rctrl, tempval); - -		gfar_check_rx_parser_mode(priv); -	} - -	gfar_change_mtu(dev, dev->mtu); - -	unlock_rx_qs(priv); -	local_irq_restore(flags); -} -  static int gfar_change_mtu(struct net_device *dev, int new_mtu)  { -	int tempsize, tempval;  	struct gfar_private *priv = netdev_priv(dev); -	struct gfar __iomem *regs = priv->gfargrp[0].regs; -	int oldsize = priv->rx_buffer_size;  	int frame_size = new_mtu + ETH_HLEN;  	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { @@ -2418,45 +2440,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)  		return -EINVAL;  	} -	if (priv->uses_rxfcb) -		frame_size += GMAC_FCB_LEN; - -	frame_size += priv->padding; - -	tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + -		   INCREMENTAL_BUFFER_SIZE; +	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) +		cpu_relax(); -	/* Only stop and start the controller if it isn't already -	 * stopped, and we changed something -	 */ -	if ((oldsize != tempsize) && (dev->flags & IFF_UP)) +	if (dev->flags & IFF_UP)  		stop_gfar(dev); -	priv->rx_buffer_size = tempsize; -  	dev->mtu = new_mtu; -	gfar_write(®s->mrblr, priv->rx_buffer_size); -	gfar_write(®s->maxfrm, priv->rx_buffer_size); +	if (dev->flags & IFF_UP) +		startup_gfar(dev); -	/* If the mtu is larger than the max size for standard -	 * ethernet frames (ie, a jumbo frame), then set maccfg2 -	 * to allow huge frames, and to check the length -	 */ -	tempval = gfar_read(®s->maccfg2); +	clear_bit_unlock(GFAR_RESETTING, &priv->state); -	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || -	    gfar_has_errata(priv, GFAR_ERRATA_74)) -		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); -	else -		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); +	return 0; +} -	gfar_write(®s->maccfg2, tempval); +void reset_gfar(struct net_device *ndev) +{ +	struct gfar_private *priv = netdev_priv(ndev); -	if ((oldsize != tempsize) && (dev->flags & IFF_UP)) -		startup_gfar(dev); +	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) +		cpu_relax(); -	return 0; +	stop_gfar(ndev); +	startup_gfar(ndev); + +	clear_bit_unlock(GFAR_RESETTING, &priv->state);  }  /* gfar_reset_task gets scheduled when a packet has not been @@ -2468,16 +2478,7 @@ static void gfar_reset_task(struct work_struct *work)  {  	struct gfar_private *priv = container_of(work, struct gfar_private,  						 reset_task); -	struct net_device *dev = priv->ndev; - -	if (dev->flags & IFF_UP) { -		netif_tx_stop_all_queues(dev); -		stop_gfar(dev); -		startup_gfar(dev); -		netif_tx_start_all_queues(dev); -	} - -	netif_tx_schedule_all(dev); +	reset_gfar(priv->ndev);  }  static void gfar_timeout(struct net_device *dev) @@ -2590,8 +2591,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)  	}  	/* If we freed a buffer, we can restart transmission, if necessary */ -	if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) -		netif_wake_subqueue(dev, tqi); +	if (tx_queue->num_txbdfree && +	    netif_tx_queue_stopped(txq) && +	    !(test_bit(GFAR_DOWN, &priv->state))) +		netif_wake_subqueue(priv->ndev, tqi);  	/* Update dirty indicators */  	tx_queue->skb_dirtytx = skb_dirtytx; @@ -2600,31 +2603,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)  	netdev_tx_completed_queue(txq, howmany, bytes_sent);  } -static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) -{ -	unsigned long flags; - -	spin_lock_irqsave(&gfargrp->grplock, flags); -	if (napi_schedule_prep(&gfargrp->napi)) { -		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); -		__napi_schedule(&gfargrp->napi); -	} else { -		/* Clear IEVENT, so interrupts aren't called again -		 * because of the packets that have already arrived. -		 */ -		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); -	} -	spin_unlock_irqrestore(&gfargrp->grplock, flags); - -} - -/* Interrupt Handler for Transmit complete */ -static irqreturn_t gfar_transmit(int irq, void *grp_id) -{ -	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); -	return IRQ_HANDLED; -} -  static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,  			   struct sk_buff *skb)  { @@ -2695,7 +2673,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev)  irqreturn_t gfar_receive(int irq, void *grp_id)  { -	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); +	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; +	unsigned long flags; +	u32 imask; + +	if (likely(napi_schedule_prep(&grp->napi_rx))) { +		spin_lock_irqsave(&grp->grplock, flags); +		imask = gfar_read(&grp->regs->imask); +		imask &= IMASK_RX_DISABLED; +		gfar_write(&grp->regs->imask, imask); +		spin_unlock_irqrestore(&grp->grplock, flags); +		__napi_schedule(&grp->napi_rx); +	} else { +		/* Clear IEVENT, so interrupts aren't called again +		 * because of the packets that have already arrived. +		 */ +		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); +	} + +	return IRQ_HANDLED; +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *grp_id) +{ +	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; +	unsigned long flags; +	u32 imask; + +	if (likely(napi_schedule_prep(&grp->napi_tx))) { +		spin_lock_irqsave(&grp->grplock, flags); +		imask = gfar_read(&grp->regs->imask); +		imask &= IMASK_TX_DISABLED; +		gfar_write(&grp->regs->imask, imask); +		spin_unlock_irqrestore(&grp->grplock, flags); +		__napi_schedule(&grp->napi_tx); +	} else { +		/* Clear IEVENT, so interrupts aren't called again +		 * because of the packets that have already arrived. +		 */ +		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); +	} +  	return IRQ_HANDLED;  } @@ -2819,7 +2838,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)  				rx_queue->stats.rx_bytes += pkt_len;  				skb_record_rx_queue(skb, rx_queue->qindex);  				gfar_process_frame(dev, skb, amount_pull, -						   &rx_queue->grp->napi); +						   &rx_queue->grp->napi_rx);  			} else {  				netif_warn(priv, rx_err, dev, "Missing skb!\n"); @@ -2848,66 +2867,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)  	return howmany;  } -static int gfar_poll_sq(struct napi_struct *napi, int budget) +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)  {  	struct gfar_priv_grp *gfargrp = -		container_of(napi, struct gfar_priv_grp, napi); +		container_of(napi, struct gfar_priv_grp, napi_rx);  	struct gfar __iomem *regs = gfargrp->regs; -	struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; -	struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; +	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;  	int work_done = 0;  	/* Clear IEVENT, so interrupts aren't called again  	 * because of the packets that have already arrived  	 */ -	gfar_write(®s->ievent, IEVENT_RTX_MASK); - -	/* run Tx cleanup to completion */ -	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) -		gfar_clean_tx_ring(tx_queue); +	gfar_write(®s->ievent, IEVENT_RX_MASK);  	work_done = gfar_clean_rx_ring(rx_queue, budget);  	if (work_done < budget) { +		u32 imask;  		napi_complete(napi);  		/* Clear the halt bit in RSTAT */  		gfar_write(®s->rstat, gfargrp->rstat); -		gfar_write(®s->imask, IMASK_DEFAULT); - -		/* If we are coalescing interrupts, update the timer -		 * Otherwise, clear it -		 */ -		gfar_write(®s->txic, 0); -		if (likely(tx_queue->txcoalescing)) -			gfar_write(®s->txic, tx_queue->txic); - -		gfar_write(®s->rxic, 0); -		if (unlikely(rx_queue->rxcoalescing)) -			gfar_write(®s->rxic, rx_queue->rxic); +		spin_lock_irq(&gfargrp->grplock); +		imask = gfar_read(®s->imask); +		imask |= IMASK_RX_DEFAULT; +		gfar_write(®s->imask, imask); +		spin_unlock_irq(&gfargrp->grplock);  	}  	return work_done;  } -static int gfar_poll(struct napi_struct *napi, int budget) +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) +{ +	struct gfar_priv_grp *gfargrp = +		container_of(napi, struct gfar_priv_grp, napi_tx); +	struct gfar __iomem *regs = gfargrp->regs; +	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; +	u32 imask; + +	/* Clear IEVENT, so interrupts aren't called again +	 * because of the packets that have already arrived +	 */ +	gfar_write(®s->ievent, IEVENT_TX_MASK); + +	/* run Tx cleanup to completion */ +	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) +		gfar_clean_tx_ring(tx_queue); + +	napi_complete(napi); + +	spin_lock_irq(&gfargrp->grplock); +	imask = gfar_read(®s->imask); +	imask |= IMASK_TX_DEFAULT; +	gfar_write(®s->imask, imask); +	spin_unlock_irq(&gfargrp->grplock); + +	return 0; +} + +static int gfar_poll_rx(struct napi_struct *napi, int budget)  {  	struct gfar_priv_grp *gfargrp = -		container_of(napi, struct gfar_priv_grp, napi); +		container_of(napi, struct gfar_priv_grp, napi_rx);  	struct gfar_private *priv = gfargrp->priv;  	struct gfar __iomem *regs = gfargrp->regs; -	struct gfar_priv_tx_q *tx_queue = NULL;  	struct gfar_priv_rx_q *rx_queue = NULL;  	int work_done = 0, work_done_per_q = 0;  	int i, budget_per_q = 0; -	int has_tx_work;  	unsigned long rstat_rxf;  	int num_act_queues;  	/* Clear IEVENT, so interrupts aren't called again  	 * because of the packets that have already arrived  	 */ -	gfar_write(®s->ievent, IEVENT_RTX_MASK); +	gfar_write(®s->ievent, IEVENT_RX_MASK);  	rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; @@ -2915,67 +2949,84 @@ static int gfar_poll(struct napi_struct *napi, int budget)  	if (num_act_queues)  		budget_per_q = budget/num_act_queues; -	while (1) { -		has_tx_work = 0; -		for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { -			tx_queue = priv->tx_queue[i]; -			/* run Tx cleanup to completion */ -			if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { -				gfar_clean_tx_ring(tx_queue); -				has_tx_work = 1; -			} -		} +	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { +		/* skip queue if not active */ +		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) +			continue; -		for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { -			/* skip queue if not active */ -			if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) -				continue; - -			rx_queue = priv->rx_queue[i]; -			work_done_per_q = -				gfar_clean_rx_ring(rx_queue, budget_per_q); -			work_done += work_done_per_q; - -			/* finished processing this queue */ -			if (work_done_per_q < budget_per_q) { -				/* clear active queue hw indication */ -				gfar_write(®s->rstat, -					   RSTAT_CLEAR_RXF0 >> i); -				rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i); -				num_act_queues--; - -				if (!num_act_queues) -					break; -				/* recompute budget per Rx queue */ -				budget_per_q = -					(budget - work_done) / num_act_queues; -			} +		rx_queue = priv->rx_queue[i]; +		work_done_per_q = +			gfar_clean_rx_ring(rx_queue, budget_per_q); +		work_done += work_done_per_q; + +		/* finished processing this queue */ +		if (work_done_per_q < budget_per_q) { +			/* clear active queue hw indication */ +			gfar_write(®s->rstat, +				   RSTAT_CLEAR_RXF0 >> i); +			num_act_queues--; + +			if (!num_act_queues) +				break;  		} +	} -		if (work_done >= budget) -			break; +	if (!num_act_queues) { +		u32 imask; +		napi_complete(napi); -		if (!num_act_queues && !has_tx_work) { +		/* Clear the halt bit in RSTAT */ +		gfar_write(®s->rstat, gfargrp->rstat); -			napi_complete(napi); +		spin_lock_irq(&gfargrp->grplock); +		imask = gfar_read(®s->imask); +		imask |= IMASK_RX_DEFAULT; +		gfar_write(®s->imask, imask); +		spin_unlock_irq(&gfargrp->grplock); +	} -			/* Clear the halt bit in RSTAT */ -			gfar_write(®s->rstat, gfargrp->rstat); +	return work_done; +} -			gfar_write(®s->imask, IMASK_DEFAULT); +static int gfar_poll_tx(struct napi_struct *napi, int budget) +{ +	struct gfar_priv_grp *gfargrp = +		container_of(napi, struct gfar_priv_grp, napi_tx); +	struct gfar_private *priv = gfargrp->priv; +	struct gfar __iomem *regs = gfargrp->regs; +	struct gfar_priv_tx_q *tx_queue = NULL; +	int has_tx_work = 0; +	int i; -			/* If we are coalescing interrupts, update the timer -			 * Otherwise, clear it -			 */ -			gfar_configure_coalescing(priv, gfargrp->rx_bit_map, -						  gfargrp->tx_bit_map); -			break; +	/* Clear IEVENT, so interrupts aren't called again +	 * because of the packets that have already arrived +	 */ +	gfar_write(®s->ievent, IEVENT_TX_MASK); + +	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { +		tx_queue = priv->tx_queue[i]; +		/* run Tx cleanup to completion */ +		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { +			gfar_clean_tx_ring(tx_queue); +			has_tx_work = 1;  		}  	} -	return work_done; +	if (!has_tx_work) { +		u32 imask; +		napi_complete(napi); + +		spin_lock_irq(&gfargrp->grplock); +		imask = gfar_read(®s->imask); +		imask |= IMASK_TX_DEFAULT; +		gfar_write(®s->imask, imask); +		spin_unlock_irq(&gfargrp->grplock); +	} + +	return 0;  } +  #ifdef CONFIG_NET_POLL_CONTROLLER  /* Polling 'interrupt' - used by things like netconsole to send skbs   * without having to re-enable interrupts. It's not called while @@ -3034,41 +3085,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)  	return IRQ_HANDLED;  } -static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) -{ -	struct phy_device *phydev = priv->phydev; -	u32 val = 0; - -	if (!phydev->duplex) -		return val; - -	if (!priv->pause_aneg_en) { -		if (priv->tx_pause_en) -			val |= MACCFG1_TX_FLOW; -		if (priv->rx_pause_en) -			val |= MACCFG1_RX_FLOW; -	} else { -		u16 lcl_adv, rmt_adv; -		u8 flowctrl; -		/* get link partner capabilities */ -		rmt_adv = 0; -		if (phydev->pause) -			rmt_adv = LPA_PAUSE_CAP; -		if (phydev->asym_pause) -			rmt_adv |= LPA_PAUSE_ASYM; - -		lcl_adv = mii_advertise_flowctrl(phydev->advertising); - -		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); -		if (flowctrl & FLOW_CTRL_TX) -			val |= MACCFG1_TX_FLOW; -		if (flowctrl & FLOW_CTRL_RX) -			val |= MACCFG1_RX_FLOW; -	} - -	return val; -} -  /* Called every time the controller might need to be made   * aware of new link state.  The PHY code conveys this   * information through variables in the phydev structure, and this @@ -3078,86 +3094,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)  static void adjust_link(struct net_device *dev)  {  	struct gfar_private *priv = netdev_priv(dev); -	struct gfar __iomem *regs = priv->gfargrp[0].regs; -	unsigned long flags;  	struct phy_device *phydev = priv->phydev; -	int new_state = 0; - -	local_irq_save(flags); -	lock_tx_qs(priv); - -	if (phydev->link) { -		u32 tempval1 = gfar_read(®s->maccfg1); -		u32 tempval = gfar_read(®s->maccfg2); -		u32 ecntrl = gfar_read(®s->ecntrl); - -		/* Now we make sure that we can be in full duplex mode. -		 * If not, we operate in half-duplex mode. -		 */ -		if (phydev->duplex != priv->oldduplex) { -			new_state = 1; -			if (!(phydev->duplex)) -				tempval &= ~(MACCFG2_FULL_DUPLEX); -			else -				tempval |= MACCFG2_FULL_DUPLEX; - -			priv->oldduplex = phydev->duplex; -		} - -		if (phydev->speed != priv->oldspeed) { -			new_state = 1; -			switch (phydev->speed) { -			case 1000: -				tempval = -				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); - -				ecntrl &= ~(ECNTRL_R100); -				break; -			case 100: -			case 10: -				tempval = -				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); - -				/* Reduced mode distinguishes -				 * between 10 and 100 -				 */ -				if (phydev->speed == SPEED_100) -					ecntrl |= ECNTRL_R100; -				else -					ecntrl &= ~(ECNTRL_R100); -				break; -			default: -				netif_warn(priv, link, dev, -					   "Ack!  Speed (%d) is not 10/100/1000!\n", -					   phydev->speed); -				break; -			} - -			priv->oldspeed = phydev->speed; -		} - -		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); -		tempval1 |= gfar_get_flowctrl_cfg(priv); - -		gfar_write(®s->maccfg1, tempval1); -		gfar_write(®s->maccfg2, tempval); -		gfar_write(®s->ecntrl, ecntrl); - -		if (!priv->oldlink) { -			new_state = 1; -			priv->oldlink = 1; -		} -	} else if (priv->oldlink) { -		new_state = 1; -		priv->oldlink = 0; -		priv->oldspeed = 0; -		priv->oldduplex = -1; -	} -	if (new_state && netif_msg_link(priv)) -		phy_print_status(phydev); -	unlock_tx_qs(priv); -	local_irq_restore(flags); +	if (unlikely(phydev->link != priv->oldlink || +		     phydev->duplex != priv->oldduplex || +		     phydev->speed != priv->oldspeed)) +		gfar_update_link_state(priv);  }  /* Update the hash table based on the current list of multicast @@ -3403,6 +3345,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)  	return IRQ_HANDLED;  } +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +{ +	struct phy_device *phydev = priv->phydev; +	u32 val = 0; + +	if (!phydev->duplex) +		return val; + +	if (!priv->pause_aneg_en) { +		if (priv->tx_pause_en) +			val |= MACCFG1_TX_FLOW; +		if (priv->rx_pause_en) +			val |= MACCFG1_RX_FLOW; +	} else { +		u16 lcl_adv, rmt_adv; +		u8 flowctrl; +		/* get link partner capabilities */ +		rmt_adv = 0; +		if (phydev->pause) +			rmt_adv = LPA_PAUSE_CAP; +		if (phydev->asym_pause) +			rmt_adv |= LPA_PAUSE_ASYM; + +		lcl_adv = mii_advertise_flowctrl(phydev->advertising); + +		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); +		if (flowctrl & FLOW_CTRL_TX) +			val |= MACCFG1_TX_FLOW; +		if (flowctrl & FLOW_CTRL_RX) +			val |= MACCFG1_RX_FLOW; +	} + +	return val; +} + +static noinline void gfar_update_link_state(struct gfar_private *priv) +{ +	struct gfar __iomem *regs = priv->gfargrp[0].regs; +	struct phy_device *phydev = priv->phydev; + +	if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) +		return; + +	if (phydev->link) { +		u32 tempval1 = gfar_read(®s->maccfg1); +		u32 tempval = gfar_read(®s->maccfg2); +		u32 ecntrl = gfar_read(®s->ecntrl); + +		if (phydev->duplex != priv->oldduplex) { +			if (!(phydev->duplex)) +				tempval &= ~(MACCFG2_FULL_DUPLEX); +			else +				tempval |= MACCFG2_FULL_DUPLEX; + +			priv->oldduplex = phydev->duplex; +		} + +		if (phydev->speed != priv->oldspeed) { +			switch (phydev->speed) { +			case 1000: +				tempval = +				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + +				ecntrl &= ~(ECNTRL_R100); +				break; +			case 100: +			case 10: +				tempval = +				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + +				/* Reduced mode distinguishes +				 * between 10 and 100 +				 */ +				if (phydev->speed == SPEED_100) +					ecntrl |= ECNTRL_R100; +				else +					ecntrl &= ~(ECNTRL_R100); +				break; +			default: +				netif_warn(priv, link, priv->ndev, +					   "Ack!  Speed (%d) is not 10/100/1000!\n", +					   phydev->speed); +				break; +			} + +			priv->oldspeed = phydev->speed; +		} + +		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); +		tempval1 |= gfar_get_flowctrl_cfg(priv); + +		gfar_write(®s->maccfg1, tempval1); +		gfar_write(®s->maccfg2, tempval); +		gfar_write(®s->ecntrl, ecntrl); + +		if (!priv->oldlink) +			priv->oldlink = 1; + +	} else if (priv->oldlink) { +		priv->oldlink = 0; +		priv->oldspeed = 0; +		priv->oldduplex = -1; +	} + +	if (netif_msg_link(priv)) +		phy_print_status(phydev); +} +  static struct of_device_id gfar_match[] =  {  	{  | 
