aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/gianfar.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r--drivers/net/gianfar.c1826
1 files changed, 1254 insertions, 572 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5bf31f1509c..16def131c39 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
*
* Author: Andy Fleming
* Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
- * Copyright (c) 2007 MontaVista Software, Inc.
+ * Copyright 2002-2009 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
+static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
-int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
-static int gfar_clean_tx_ring(struct net_device *dev);
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull);
static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -142,11 +143,277 @@ void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ dma_addr_t buf)
+{
+ u32 lstatus;
+
+ bdp->bufPtr = buf;
+
+ lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+ if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
+ lstatus |= BD_LFLAG(RXBD_WRAP);
+
+ eieio();
+
+ bdp->lstatus = lstatus;
+}
+
+static int gfar_init_bds(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct txbd8 *txbdp;
+ struct rxbd8 *rxbdp;
+ int i, j;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ /* Initialize some variables in our dev structure */
+ tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+ tx_queue->dirty_tx = tx_queue->tx_bd_base;
+ tx_queue->cur_tx = tx_queue->tx_bd_base;
+ tx_queue->skb_curtx = 0;
+ tx_queue->skb_dirtytx = 0;
+
+ /* Initialize Transmit Descriptor Ring */
+ txbdp = tx_queue->tx_bd_base;
+ for (j = 0; j < tx_queue->tx_ring_size; j++) {
+ txbdp->lstatus = 0;
+ txbdp->bufPtr = 0;
+ txbdp++;
+ }
+
+ /* Set the last descriptor in the ring to indicate wrap */
+ txbdp--;
+ txbdp->status |= TXBD_WRAP;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->cur_rx = rx_queue->rx_bd_base;
+ rx_queue->skb_currx = 0;
+ rxbdp = rx_queue->rx_bd_base;
+
+ for (j = 0; j < rx_queue->rx_ring_size; j++) {
+ struct sk_buff *skb = rx_queue->rx_skbuff[j];
+
+ if (skb) {
+ gfar_init_rxbdp(rx_queue, rxbdp,
+ rxbdp->bufPtr);
+ } else {
+ skb = gfar_new_skb(ndev);
+ if (!skb) {
+ pr_err("%s: Can't allocate RX buffers\n",
+ ndev->name);
+ goto err_rxalloc_fail;
+ }
+ rx_queue->rx_skbuff[j] = skb;
+
+ gfar_new_rxbdp(rx_queue, rxbdp, skb);
+ }
+
+ rxbdp++;
+ }
+
+ }
+
+ return 0;
+
+err_rxalloc_fail:
+ free_skb_resources(priv);
+ return -ENOMEM;
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+ void *vaddr;
+ dma_addr_t addr;
+ int i, j, k;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->ofdev->dev;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ priv->total_tx_ring_size = 0;
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+ priv->total_rx_ring_size = 0;
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
+
+ /* Allocate memory for the buffer descriptors */
+ vaddr = dma_alloc_coherent(dev,
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ &addr, GFP_KERNEL);
+ if (!vaddr) {
+ if (netif_msg_ifup(priv))
+ pr_err("%s: Could not allocate buffer descriptors!\n",
+ ndev->name);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
+ tx_queue->tx_bd_dma_base = addr;
+ tx_queue->dev = ndev;
+ /* enet DMA only understands physical addresses */
+ addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ }
+
+ /* Start the rx descriptor ring where the tx ring leaves off */
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
+ rx_queue->rx_bd_dma_base = addr;
+ rx_queue->dev = ndev;
+ addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ }
+
+ /* Setup the skbuff rings */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
+ tx_queue->tx_ring_size, GFP_KERNEL);
+ if (!tx_queue->tx_skbuff) {
+ if (netif_msg_ifup(priv))
+ pr_err("%s: Could not allocate tx_skbuff\n",
+ ndev->name);
+ goto cleanup;
+ }
+
+ for (k = 0; k < tx_queue->tx_ring_size; k++)
+ tx_queue->tx_skbuff[k] = NULL;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
+ rx_queue->rx_ring_size, GFP_KERNEL);
+
+ if (!rx_queue->rx_skbuff) {
+ if (netif_msg_ifup(priv))
+ pr_err("%s: Could not allocate rx_skbuff\n",
+ ndev->name);
+ goto cleanup;
+ }
+
+ for (j = 0; j < rx_queue->rx_ring_size; j++)
+ rx_queue->rx_skbuff[j] = NULL;
+ }
+
+ if (gfar_init_bds(ndev))
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ free_skb_resources(priv);
+ return -ENOMEM;
+}
+
+static void gfar_init_tx_rx_base(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->tbase0;
+ for(i = 0; i < priv->num_tx_queues; i++) {
+ gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
+ baddr += 2;
+ }
+
+ baddr = &regs->rbase0;
+ for(i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
+ baddr += 2;
+ }
+}
+
+static void gfar_init_mac(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 rctrl = 0;
+ u32 tctrl = 0;
+ u32 attrs = 0;
+
+ /* write the tx/rx base registers */
+ gfar_init_tx_rx_base(priv);
+
+ /* Configure the coalescing support */
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+
+ if (priv->rx_filer_enable)
+ rctrl |= RCTRL_FILREN;
+
+ if (priv->rx_csum_enable)
+ rctrl |= RCTRL_CHECKSUMMING;
+
+ if (priv->extended_hash) {
+ rctrl |= RCTRL_EXTHASH;
+
+ gfar_clear_exact_match(ndev);
+ rctrl |= RCTRL_EMEN;
+ }
+
+ if (priv->padding) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PADDING(priv->padding);
+ }
+
+ /* keep vlan related bits if it's enabled */
+ if (priv->vlgrp) {
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ tctrl |= TCTRL_VLINS;
+ }
+
+ /* Init rctrl based on our settings */
+ gfar_write(&regs->rctrl, rctrl);
+
+ if (ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+ tctrl |= TCTRL_TXSCHED_PRIO;
+
+ gfar_write(&regs->tctrl, tctrl);
+
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
+
+ gfar_write(&regs->attreli, attrs);
+
+ /* Start with defaults, and add stashing or locking
+ * depending on the approprate variables */
+ attrs = ATTR_INIT_SETTINGS;
+
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
+
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
+ gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
+ gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
@@ -155,6 +422,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
+ .ndo_select_queue = gfar_select_queue,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -163,56 +431,252 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
+unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+
+void lock_rx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ spin_lock(&priv->rx_queue[i]->rxlock);
+}
+
+void lock_tx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_lock(&priv->tx_queue[i]->txlock);
+}
+
+void unlock_rx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ spin_unlock(&priv->rx_queue[i]->rxlock);
+}
+
+void unlock_tx_qs(struct gfar_private *priv)
+{
+ int i = 0x0;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_unlock(&priv->tx_queue[i]->txlock);
+}
+
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
return priv->vlgrp || priv->rx_csum_enable;
}
-static int gfar_of_init(struct net_device *dev)
+u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ return skb_get_queue_mapping(skb);
+}
+static void free_tx_pointers(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ kfree(priv->tx_queue[i]);
+}
+
+static void free_rx_pointers(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ kfree(priv->rx_queue[i]);
+}
+
+static void unmap_group_regs(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < MAXGROUPS; i++)
+ if (priv->gfargrp[i].regs)
+ iounmap(priv->gfargrp[i].regs);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_grps; i++)
+ napi_disable(&priv->gfargrp[i].napi);
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+ int i = 0;
+
+ for (i = 0; i < priv->num_grps; i++)
+ napi_enable(&priv->gfargrp[i].napi);
+}
+
+static int gfar_parse_group(struct device_node *np,
+ struct gfar_private *priv, const char *model)
+{
+ u32 *queue_mask;
+ u64 addr, size;
+
+ addr = of_translate_address(np,
+ of_get_address(np, 0, &size, NULL));
+ priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
+
+ if (!priv->gfargrp[priv->num_grps].regs)
+ return -ENOMEM;
+
+ priv->gfargrp[priv->num_grps].interruptTransmit =
+ irq_of_parse_and_map(np, 0);
+
+ /* If we aren't the FEC we have multiple interrupts */
+ if (model && strcasecmp(model, "FEC")) {
+ priv->gfargrp[priv->num_grps].interruptReceive =
+ irq_of_parse_and_map(np, 1);
+ priv->gfargrp[priv->num_grps].interruptError =
+ irq_of_parse_and_map(np,2);
+ if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
+ priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
+ priv->gfargrp[priv->num_grps].interruptError < 0) {
+ return -EINVAL;
+ }
+ }
+
+ priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
+ priv->gfargrp[priv->num_grps].priv = priv;
+ spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+ if(priv->mode == MQ_MG_MODE) {
+ queue_mask = (u32 *)of_get_property(np,
+ "fsl,rx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].rx_bit_map =
+ queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
+ queue_mask = (u32 *)of_get_property(np,
+ "fsl,tx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].tx_bit_map =
+ queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ } else {
+ priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
+ priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+ }
+ priv->num_grps++;
+
+ return 0;
+}
+
+static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
{
const char *model;
const char *ctype;
const void *mac_addr;
- u64 addr, size;
- int err = 0;
- struct gfar_private *priv = netdev_priv(dev);
- struct device_node *np = priv->node;
+ int err = 0, i;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct device_node *np = ofdev->node;
+ struct device_node *child = NULL;
const u32 *stash;
const u32 *stash_len;
const u32 *stash_idx;
+ unsigned int num_tx_qs, num_rx_qs;
+ u32 *tx_queues, *rx_queues;
if (!np || !of_device_is_available(np))
return -ENODEV;
- /* get a pointer to the register memory */
- addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
- priv->regs = ioremap(addr, size);
+ /* parse the num of tx and rx queues */
+ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
+ num_tx_qs = tx_queues ? *tx_queues : 1;
+
+ if (num_tx_qs > MAX_TX_QS) {
+ printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+ num_tx_qs, MAX_TX_QS);
+ printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+ num_rx_qs = rx_queues ? *rx_queues : 1;
- if (priv->regs == NULL)
+ if (num_rx_qs > MAX_RX_QS) {
+ printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+ num_tx_qs, MAX_TX_QS);
+ printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
+ dev = *pdev;
+ if (NULL == dev)
return -ENOMEM;
- priv->interruptTransmit = irq_of_parse_and_map(np, 0);
+ priv = netdev_priv(dev);
+ priv->node = ofdev->node;
+ priv->ndev = dev;
+
+ dev->num_tx_queues = num_tx_qs;
+ dev->real_num_tx_queues = num_tx_qs;
+ priv->num_tx_queues = num_tx_qs;
+ priv->num_rx_queues = num_rx_qs;
+ priv->num_grps = 0x0;
model = of_get_property(np, "model", NULL);
- /* If we aren't the FEC we have multiple interrupts */
- if (model && strcasecmp(model, "FEC")) {
- priv->interruptReceive = irq_of_parse_and_map(np, 1);
+ for (i = 0; i < MAXGROUPS; i++)
+ priv->gfargrp[i].regs = NULL;
+
+ /* Parse and initialize group specific information */
+ if (of_device_is_compatible(np, "fsl,etsec2")) {
+ priv->mode = MQ_MG_MODE;
+ for_each_child_of_node(np, child) {
+ err = gfar_parse_group(child, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+ } else {
+ priv->mode = SQ_SG_MODE;
+ err = gfar_parse_group(np, priv, model);
+ if(err)
+ goto err_grp_init;
+ }
- priv->interruptError = irq_of_parse_and_map(np, 2);
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i] = NULL;
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i] = NULL;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
+ sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
+ if (!priv->tx_queue[i]) {
+ err = -ENOMEM;
+ goto tx_alloc_failed;
+ }
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = dev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
- if (priv->interruptTransmit < 0 ||
- priv->interruptReceive < 0 ||
- priv->interruptError < 0) {
- err = -EINVAL;
- goto err_out;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+ sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
+ if (!priv->rx_queue[i]) {
+ err = -ENOMEM;
+ goto rx_alloc_failed;
}
+ priv->rx_queue[i]->rx_skbuff = NULL;
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->dev = dev;
+ spin_lock_init(&(priv->rx_queue[i]->rxlock));
}
+
stash = of_get_property(np, "bd-stash", NULL);
- if(stash) {
+ if (stash) {
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
priv->bd_stash_en = 1;
}
@@ -270,8 +734,13 @@ static int gfar_of_init(struct net_device *dev)
return 0;
-err_out:
- iounmap(priv->regs);
+rx_alloc_failed:
+ free_rx_pointers(priv);
+tx_alloc_failed:
+ free_tx_pointers(priv);
+err_grp_init:
+ unmap_group_regs(priv);
+ free_netdev(dev);
return err;
}
@@ -289,6 +758,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
}
+static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
+{
+ unsigned int new_bit_map = 0x0;
+ int mask = 0x1 << (max_qs - 1), i;
+ for (i = 0; i < max_qs; i++) {
+ if (bit_map & mask)
+ new_bit_map = new_bit_map + (1 << i);
+ mask = mask >> 0x1;
+ }
+ return new_bit_map;
+}
+
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+ u32 class)
+{
+ u32 rqfpr = FPR_FILER_MASK;
+ u32 rqfcr = 0x0;
+
+ rqfar--;
+ rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ ftp_rqfpr[rqfar] = rqfpr;
+ ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_NOMATCH;
+ ftp_rqfpr[rqfar] = rqfpr;
+ ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+ rqfpr = class;
+ ftp_rqfcr[rqfar] = rqfcr;
+ ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+ rqfpr = class;
+ ftp_rqfcr[rqfar] = rqfcr;
+ ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+ int i = 0x0;
+ u32 rqfar = MAX_FILER_IDX;
+ u32 rqfcr = 0x0;
+ u32 rqfpr = FPR_FILER_MASK;
+
+ /* Default rule */
+ rqfcr = RQFCR_CMP_MATCH;
+ ftp_rqfcr[rqfar] = rqfcr;
+ ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+ /* cur_filer_idx indicated the fisrt non-masked rule */
+ priv->cur_filer_idx = rqfar;
+
+ /* Rest are masked rules */
+ rqfcr = RQFCR_CMP_NOMATCH;
+ for (i = 0; i < rqfar; i++) {
+ ftp_rqfcr[i] = rqfcr;
+ ftp_rqfpr[i] = rqfpr;
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+}
+
/* Set up the ethernet device structure, private data,
* and anything else we need before we start */
static int gfar_probe(struct of_device *ofdev,
@@ -297,14 +845,17 @@ static int gfar_probe(struct of_device *ofdev,
u32 tempval;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
- int err = 0;
+ struct gfar __iomem *regs = NULL;
+ int err = 0, i, grp_idx = 0;
int len_devname;
+ u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
+ u32 isrg = 0;
+ u32 __iomem *baddr;
- /* Create an ethernet device instance */
- dev = alloc_etherdev(sizeof (*priv));
+ err = gfar_of_init(ofdev, &dev);
- if (NULL == dev)
- return -ENOMEM;
+ if (err)
+ return err;
priv = netdev_priv(dev);
priv->ndev = dev;
@@ -312,50 +863,46 @@ static int gfar_probe(struct of_device *ofdev,
priv->node = ofdev->node;
SET_NETDEV_DEV(dev, &ofdev->dev);
- err = gfar_of_init(dev);
-
- if (err)
- goto regs_fail;
-
- spin_lock_init(&priv->txlock);
- spin_lock_init(&priv->rxlock);
spin_lock_init(&priv->bflock);
INIT_WORK(&priv->reset_task, gfar_reset_task);
dev_set_drvdata(&ofdev->dev, priv);
+ regs = priv->gfargrp[0].regs;
/* Stop the DMA engine now, in case it was running before */
/* (The firmware could have used it, and left it running). */
gfar_halt(dev);
/* Reset MAC layer */
- gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
+ gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
/* We need to delay at least 3 TX clocks */
udelay(2);
tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
- gfar_write(&priv->regs->maccfg1, tempval);
+ gfar_write(&regs->maccfg1, tempval);
/* Initialize MACCFG2. */
- gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
+ gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
/* Initialize ECNTRL */
- gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
+ gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
/* Set the dev->base_addr to the gfar reg region */
- dev->base_addr = (unsigned long) (priv->regs);
+ dev->base_addr = (unsigned long) regs;
SET_NETDEV_DEV(dev, &ofdev->dev);
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
dev->mtu = 1500;
-
dev->netdev_ops = &gfar_netdev_ops;
dev->ethtool_ops = &gfar_ethtool_ops;
+ /* Register for napi ...We are registering NAPI for each grp */
+ for (i = 0; i < priv->num_grps; i++)
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
priv->rx_csum_enable = 1;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -371,35 +918,35 @@ static int gfar_probe(struct of_device *ofdev,
priv->extended_hash = 1;
priv->hash_width = 9;
- priv->hash_regs[0] = &priv->regs->igaddr0;
- priv->hash_regs[1] = &priv->regs->igaddr1;
- priv->hash_regs[2] = &priv->regs->igaddr2;
- priv->hash_regs[3] = &priv->regs->igaddr3;
- priv->hash_regs[4] = &priv->regs->igaddr4;
- priv->hash_regs[5] = &priv->regs->igaddr5;
- priv->hash_regs[6] = &priv->regs->igaddr6;
- priv->hash_regs[7] = &priv->regs->igaddr7;
- priv->hash_regs[8] = &priv->regs->gaddr0;
- priv->hash_regs[9] = &priv->regs->gaddr1;
- priv->hash_regs[10] = &priv->regs->gaddr2;
- priv->hash_regs[11] = &priv->regs->gaddr3;
- priv->hash_regs[12] = &priv->regs->gaddr4;
- priv->hash_regs[13] = &priv->regs->gaddr5;
- priv->hash_regs[14] = &priv->regs->gaddr6;
- priv->hash_regs[15] = &priv->regs->gaddr7;
+ priv->hash_regs[0] = &regs->igaddr0;
+ priv->hash_regs[1] = &regs->igaddr1;
+ priv->hash_regs[2] = &regs->igaddr2;
+ priv->hash_regs[3] = &regs->igaddr3;
+ priv->hash_regs[4] = &regs->igaddr4;
+ priv->hash_regs[5] = &regs->igaddr5;
+ priv->hash_regs[6] = &regs->igaddr6;
+ priv->hash_regs[7] = &regs->igaddr7;
+ priv->hash_regs[8] = &regs->gaddr0;
+ priv->hash_regs[9] = &regs->gaddr1;
+ priv->hash_regs[10] = &regs->gaddr2;
+ priv->hash_regs[11] = &regs->gaddr3;
+ priv->hash_regs[12] = &regs->gaddr4;
+ priv->hash_regs[13] = &regs->gaddr5;
+ priv->hash_regs[14] = &regs->gaddr6;
+ priv->hash_regs[15] = &regs->gaddr7;
} else {
priv->extended_hash = 0;
priv->hash_width = 8;
- priv->hash_regs[0] = &priv->regs->gaddr0;
- priv->hash_regs[1] = &priv->regs->gaddr1;
- priv->hash_regs[2] = &priv->regs->gaddr2;
- priv->hash_regs[3] = &priv->regs->gaddr3;
- priv->hash_regs[4] = &priv->regs->gaddr4;
- priv->hash_regs[5] = &priv->regs->gaddr5;
- priv->hash_regs[6] = &priv->regs->gaddr6;
- priv->hash_regs[7] = &priv->regs->gaddr7;
+ priv->hash_regs[0] = &regs->gaddr0;
+ priv->hash_regs[1] = &regs->gaddr1;
+ priv->hash_regs[2] = &regs->gaddr2;
+ priv->hash_regs[3] = &regs->gaddr3;
+ priv->hash_regs[4] = &regs->gaddr4;
+ priv->hash_regs[5] = &regs->gaddr5;
+ priv->hash_regs[6] = &regs->gaddr6;
+ priv->hash_regs[7] = &regs->gaddr7;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -410,15 +957,70 @@ static int gfar_probe(struct of_device *ofdev,
if (dev->features & NETIF_F_IP_CSUM)
dev->hard_header_len += GMAC_FCB_LEN;
+ /* Program the isrg regs only if number of grps > 1 */
+ if (priv->num_grps > 1) {
+ baddr = &regs->isrg0;
+ for (i = 0; i < priv->num_grps; i++) {
+ isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
+ isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
+ gfar_write(baddr, isrg);
+ baddr++;
+ isrg = 0x0;
+ }
+ }
+
+ /* Need to reverse the bit maps as bit_map's MSB is q0
+ * but, for_each_bit parses from right to left, which
+ * basically reverses the queue numbers */
+ for (i = 0; i< priv->num_grps; i++) {
+ priv->gfargrp[i].tx_bit_map = reverse_bitmap(
+ priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+ priv->gfargrp[i].rx_bit_map = reverse_bitmap(
+ priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ }
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups */
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+ for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+ priv->num_rx_queues) {
+ priv->gfargrp[grp_idx].num_rx_queues++;
+ priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
+ rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
+ rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ }
+ priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+ for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+ priv->num_tx_queues) {
+ priv->gfargrp[grp_idx].num_tx_queues++;
+ priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
+ tstat = tstat | (TSTAT_CLEAR_THALT >> i);
+ tqueue = tqueue | (TQUEUE_EN0 >> i);
+ }
+ priv->gfargrp[grp_idx].rstat = rstat;
+ priv->gfargrp[grp_idx].tstat = tstat;
+ rstat = tstat =0;
+ }
+
+ gfar_write(&regs->rqueue, rqueue);
+ gfar_write(&regs->tqueue, tqueue);
+
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
- priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
- priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
- priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
- priv->txcoalescing = DEFAULT_TX_COALESCE;
- priv->txic = DEFAULT_TXIC;
- priv->rxcoalescing = DEFAULT_RX_COALESCE;
- priv->rxic = DEFAULT_RXIC;
+ /* Initializing some of the rx/tx queue level parameters */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
+ priv->tx_queue[i]->txic = DEFAULT_TXIC;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
+ priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
+ priv->rx_queue[i]->rxic = DEFAULT_RXIC;
+ }
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -439,20 +1041,43 @@ static int gfar_probe(struct of_device *ofdev,
/* fill out IRQ number and name fields */
len_devname = strlen(dev->name);
- strncpy(&priv->int_name_tx[0], dev->name, len_devname);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- strncpy(&priv->int_name_tx[len_devname],
- "_tx", sizeof("_tx") + 1);
-
- strncpy(&priv->int_name_rx[0], dev->name, len_devname);
- strncpy(&priv->int_name_rx[len_devname],
- "_rx", sizeof("_rx") + 1);
+ for (i = 0; i < priv->num_grps; i++) {
+ strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
+ len_devname);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
+ "_g", sizeof("_g"));
+ priv->gfargrp[i].int_name_tx[
+ strlen(priv->gfargrp[i].int_name_tx)] = i+48;
+ strncpy(&priv->gfargrp[i].int_name_tx[strlen(
+ priv->gfargrp[i].int_name_tx)],
+ "_tx", sizeof("_tx") + 1);
+
+ strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
+ len_devname);
+ strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
+ "_g", sizeof("_g"));
+ priv->gfargrp[i].int_name_rx[
+ strlen(priv->gfargrp[i].int_name_rx)] = i+48;
+ strncpy(&priv->gfargrp[i].int_name_rx[strlen(
+ priv->gfargrp[i].int_name_rx)],
+ "_rx", sizeof("_rx") + 1);
+
+ strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
+ len_devname);
+ strncpy(&priv->gfargrp[i].int_name_er[len_devname],
+ "_g", sizeof("_g"));
+ priv->gfargrp[i].int_name_er[strlen(
+ priv->gfargrp[i].int_name_er)] = i+48;
+ strncpy(&priv->gfargrp[i].int_name_er[strlen(\
+ priv->gfargrp[i].int_name_er)],
+ "_er", sizeof("_er") + 1);
+ } else
+ priv->gfargrp[i].int_name_tx[len_devname] = '\0';
+ }
- strncpy(&priv->int_name_er[0], dev->name, len_devname);
- strncpy(&priv->int_name_er[len_devname],
- "_er", sizeof("_er") + 1);
- } else
- priv->int_name_tx[len_devname] = '\0';
+ /* Initialize the filer table */
+ gfar_init_filer_table(priv);
/* Create all the sysfs files */
gfar_init_sysfs(dev);
@@ -463,14 +1088,19 @@ static int gfar_probe(struct of_device *ofdev,
/* Even more device info helps when determining which kernel */
/* provided which set of benchmarks. */
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
- printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
- dev->name, priv->rx_ring_size, priv->tx_ring_size);
+ for (i = 0; i < priv->num_rx_queues; i++)
+ printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+ dev->name, i, priv->rx_queue[i]->rx_ring_size);
+ for(i = 0; i < priv->num_tx_queues; i++)
+ printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+ dev->name, i, priv->tx_queue[i]->tx_ring_size);
return 0;
register_fail:
- iounmap(priv->regs);
-regs_fail:
+ unmap_group_regs(priv);
+ free_tx_pointers(priv);
+ free_rx_pointers(priv);
if (priv->phy_node)
of_node_put(priv->phy_node);
if (priv->tbi_node)
@@ -491,54 +1121,59 @@ static int gfar_remove(struct of_device *ofdev)
dev_set_drvdata(&ofdev->dev, NULL);
unregister_netdev(priv->ndev);
- iounmap(priv->regs);
+ unmap_group_regs(priv);
free_netdev(priv->ndev);
return 0;
}
#ifdef CONFIG_PM
-static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
+
+static int gfar_suspend(struct device *dev)
{
- struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
- struct net_device *dev = priv->ndev;
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
- netif_device_detach(dev);
+ netif_device_detach(ndev);
- if (netif_running(dev)) {
- spin_lock_irqsave(&priv->txlock, flags);
- spin_lock(&priv->rxlock);
+ if (netif_running(ndev)) {
- gfar_halt_nodisable(dev);
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+ gfar_halt_nodisable(ndev);
/* Disable Tx, and Rx if wake-on-LAN is disabled. */
- tempval = gfar_read(&priv->regs->maccfg1);
+ tempval = gfar_read(&regs->maccfg1);
tempval &= ~MACCFG1_TX_EN;
if (!magic_packet)
tempval &= ~MACCFG1_RX_EN;
- gfar_write(&priv->regs->maccfg1, tempval);
+ gfar_write(&regs->maccfg1, tempval);
- spin_unlock(&priv->rxlock);
- spin_unlock_irqrestore(&priv->txlock, flags);
+ unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+ local_irq_restore(flags);
- napi_disable(&priv->napi);
+ disable_napi(priv);
if (magic_packet) {
/* Enable interrupt on Magic Packet */
- gfar_write(&priv->regs->imask, IMASK_MAG);
+ gfar_write(&regs->imask, IMASK_MAG);
/* Enable Magic Packet mode */
- tempval = gfar_read(&priv->regs->maccfg2);
+ tempval = gfar_read(&regs->maccfg2);
tempval |= MACCFG2_MPEN;
- gfar_write(&priv->regs->maccfg2, tempval);
+ gfar_write(&regs->maccfg2, tempval);
} else {
phy_stop(priv->phydev);
}
@@ -547,17 +1182,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
return 0;
}
-static int gfar_resume(struct of_device *ofdev)
+static int gfar_resume(struct device *dev)
{
- struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
- struct net_device *dev = priv->ndev;
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
- if (!netif_running(dev)) {
- netif_device_attach(dev);
+ if (!netif_running(ndev)) {
+ netif_device_attach(ndev);
return 0;
}
@@ -567,28 +1203,80 @@ static int gfar_resume(struct of_device *ofdev)
/* Disable Magic Packet mode, in case something
* else woke us up.
*/
+ local_irq_save(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);