diff options
| author | Steve French <sfrench@us.ibm.com> | 2005-10-31 08:36:11 -0800 | 
|---|---|---|
| committer | Steve French <sfrench@us.ibm.com> | 2005-10-31 08:36:11 -0800 | 
| commit | 53b2ec5518aa2623e8c0cb36f1c304a797988a46 (patch) | |
| tree | 465d8631ade6c2fcbd7576ff9813d00116c6a1e8 /drivers/net/ibm_emac/ibm_emac_mal.c | |
| parent | 0753ca7bc2b876dd136e9db11a20f85cbe4e08b1 (diff) | |
| parent | 581c1b14394aee60aff46ea67d05483261ed6527 (diff) | |
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'drivers/net/ibm_emac/ibm_emac_mal.c')
| -rw-r--r-- | drivers/net/ibm_emac/ibm_emac_mal.c | 674 | 
1 files changed, 397 insertions, 277 deletions
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c index e59f57f363c..da88d43081c 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/drivers/net/ibm_emac/ibm_emac_mal.c @@ -1,436 +1,565 @@  /* - * ibm_ocp_mal.c + * drivers/net/ibm_emac/ibm_emac_mal.c   * - *      Armin Kuster akuster@mvista.com - *      Juen, 2002 + * Memory Access Layer (MAL) support + *  + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>   * - * Copyright 2002 MontaVista Softare Inc. + * Based on original work by + *      Benjamin Herrenschmidt <benh@kernel.crashing.org>, + *      David Gibson <hermes@gibson.dropbear.id.au>, + * + *      Armin Kuster <akuster@mvista.com> + *      Copyright 2002 MontaVista Softare Inc.   *   * This program is free software; you can redistribute  it and/or modify it   * under  the terms of  the GNU General  Public License as published by the   * Free Software Foundation;  either version 2 of the  License, or (at your   * option) any later version. + *   */ -  #include <linux/config.h>  #include <linux/module.h>  #include <linux/kernel.h>  #include <linux/errno.h>  #include <linux/netdevice.h>  #include <linux/init.h> +#include <linux/interrupt.h>  #include <linux/dma-mapping.h> -#include <asm/io.h> -#include <asm/irq.h>  #include <asm/ocp.h> +#include "ibm_emac_core.h"  #include "ibm_emac_mal.h" +#include "ibm_emac_debug.h" -// Locking: Should we share a lock with the client ? The client could provide -// a lock pointer (optionally) in the commac structure... I don't think this is -// really necessary though - -/* This lock protects the commac list. On today UP implementations, it's - * really only used as IRQ protection in mal_{register,unregister}_commac() - */ -static DEFINE_RWLOCK(mal_list_lock); - -int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) +int __init mal_register_commac(struct ibm_ocp_mal *mal, +			       struct mal_commac *commac)  {  	unsigned long flags; +	local_irq_save(flags); -	write_lock_irqsave(&mal_list_lock, flags); +	MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index, +		commac->tx_chan_mask, commac->rx_chan_mask); -	/* Don't let multiple commacs claim the same channel */ +	/* Don't let multiple commacs claim the same channel(s) */  	if ((mal->tx_chan_mask & commac->tx_chan_mask) ||  	    (mal->rx_chan_mask & commac->rx_chan_mask)) { -		write_unlock_irqrestore(&mal_list_lock, flags); +		local_irq_restore(flags); +		printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", +		       mal->def->index);  		return -EBUSY;  	}  	mal->tx_chan_mask |= commac->tx_chan_mask;  	mal->rx_chan_mask |= commac->rx_chan_mask; +	list_add(&commac->list, &mal->list); -	list_add(&commac->list, &mal->commac); - -	write_unlock_irqrestore(&mal_list_lock, flags); - +	local_irq_restore(flags);  	return 0;  } -int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) +void __exit mal_unregister_commac(struct ibm_ocp_mal *mal, +				  struct mal_commac *commac)  {  	unsigned long flags; +	local_irq_save(flags); -	write_lock_irqsave(&mal_list_lock, flags); +	MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index, +		commac->tx_chan_mask, commac->rx_chan_mask);  	mal->tx_chan_mask &= ~commac->tx_chan_mask;  	mal->rx_chan_mask &= ~commac->rx_chan_mask; -  	list_del_init(&commac->list); -	write_unlock_irqrestore(&mal_list_lock, flags); - -	return 0; +	local_irq_restore(flags);  }  int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)  { -	switch (channel) { -	case 0: -		set_mal_dcrn(mal, DCRN_MALRCBS0, size); -		break; -#ifdef DCRN_MALRCBS1 -	case 1: -		set_mal_dcrn(mal, DCRN_MALRCBS1, size); -		break; -#endif -#ifdef DCRN_MALRCBS2 -	case 2: -		set_mal_dcrn(mal, DCRN_MALRCBS2, size); -		break; -#endif -#ifdef DCRN_MALRCBS3 -	case 3: -		set_mal_dcrn(mal, DCRN_MALRCBS3, size); -		break; -#endif -	default: +	struct ocp_func_mal_data *maldata = mal->def->additions; +	BUG_ON(channel < 0 || channel >= maldata->num_rx_chans || +	       size > MAL_MAX_RX_SIZE); + +	MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size); + +	if (size & 0xf) { +		printk(KERN_WARNING +		       "mal%d: incorrect RX size %lu for the channel %d\n", +		       mal->def->index, size, channel);  		return -EINVAL;  	} +	set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);  	return 0;  } -static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs) +int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel)  { -	struct ibm_ocp_mal *mal = dev_instance; -	unsigned long mal_error; +	struct ocp_func_mal_data *maldata = mal->def->additions; +	BUG_ON(channel < 0 || channel >= maldata->num_tx_chans); +	return channel * NUM_TX_BUFF; +} -	/* -	 * This SERR applies to one of the devices on the MAL, here we charge -	 * it against the first EMAC registered for the MAL. -	 */ +int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel) +{ +	struct ocp_func_mal_data *maldata = mal->def->additions; +	BUG_ON(channel < 0 || channel >= maldata->num_rx_chans); +	return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; +} -	mal_error = get_mal_dcrn(mal, DCRN_MALESR); +void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel) +{ +	local_bh_disable(); +	MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel); +	set_mal_dcrn(mal, MAL_TXCASR, +		     get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); +	local_bh_enable(); +} -	printk(KERN_ERR "%s: System Error (MALESR=%lx)\n", -	       "MAL" /* FIXME: get the name right */ , mal_error); +void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel) +{ +	set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); +	MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel); +} -	/* FIXME: decipher error */ -	/* DIXME: distribute to commacs, if possible */ +void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel) +{ +	local_bh_disable(); +	MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel); +	set_mal_dcrn(mal, MAL_RXCASR, +		     get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); +	local_bh_enable(); +} -	/* Clear the error status register */ -	set_mal_dcrn(mal, DCRN_MALESR, mal_error); +void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel) +{ +	set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); +	MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel); +} -	return IRQ_HANDLED; +void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac) +{ +	local_bh_disable(); +	MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac); +	list_add_tail(&commac->poll_list, &mal->poll_list); +	local_bh_enable();  } -static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs) +void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac) +{ +	local_bh_disable(); +	MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac); +	list_del(&commac->poll_list); +	local_bh_enable(); +} + +/* synchronized by mal_poll() */ +static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal) +{ +	MAL_DBG2("%d: enable_irq" NL, mal->def->index); +	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); +} + +/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ +static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal) +{ +	set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); +	MAL_DBG2("%d: disable_irq" NL, mal->def->index); +} + +static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)  {  	struct ibm_ocp_mal *mal = dev_instance; -	struct list_head *l; -	unsigned long isr; +	u32 esr = get_mal_dcrn(mal, MAL_ESR); -	isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR); -	set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr); +	/* Clear the error status register */ +	set_mal_dcrn(mal, MAL_ESR, esr); -	read_lock(&mal_list_lock); -	list_for_each(l, &mal->commac) { -		struct mal_commac *mc = list_entry(l, struct mal_commac, list); +	MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr); -		if (isr & mc->tx_chan_mask) { -			mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask); +	if (esr & MAL_ESR_EVB) { +		if (esr & MAL_ESR_DE) { +			/* We ignore Descriptor error, +			 * TXDE or RXDE interrupt will be generated anyway. +			 */ +			return IRQ_HANDLED;  		} + +		if (esr & MAL_ESR_PEIN) { +			/* PLB error, it's probably buggy hardware or +			 * incorrect physical address in BD (i.e. bug) +			 */ +			if (net_ratelimit()) +				printk(KERN_ERR +				       "mal%d: system error, PLB (ESR = 0x%08x)\n", +				       mal->def->index, esr); +			return IRQ_HANDLED; +		} + +		/* OPB error, it's probably buggy hardware or incorrect EBC setup */ +		if (net_ratelimit()) +			printk(KERN_ERR +			       "mal%d: system error, OPB (ESR = 0x%08x)\n", +			       mal->def->index, esr);  	} -	read_unlock(&mal_list_lock); +	return IRQ_HANDLED; +} + +static inline void mal_schedule_poll(struct ibm_ocp_mal *mal) +{ +	if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { +		MAL_DBG2("%d: schedule_poll" NL, mal->def->index); +		mal_disable_eob_irq(mal); +		__netif_rx_schedule(&mal->poll_dev); +	} else +		MAL_DBG2("%d: already in poll" NL, mal->def->index); +} +static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs) +{ +	struct ibm_ocp_mal *mal = dev_instance; +	u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); +	MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r); +	mal_schedule_poll(mal); +	set_mal_dcrn(mal, MAL_TXEOBISR, r);  	return IRQ_HANDLED;  }  static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)  {  	struct ibm_ocp_mal *mal = dev_instance; -	struct list_head *l; -	unsigned long isr; +	u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); +	MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r); +	mal_schedule_poll(mal); +	set_mal_dcrn(mal, MAL_RXEOBISR, r); +	return IRQ_HANDLED; +} -	isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR); -	set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr); +static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs) +{ +	struct ibm_ocp_mal *mal = dev_instance; +	u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); +	set_mal_dcrn(mal, MAL_TXDEIR, deir); -	read_lock(&mal_list_lock); -	list_for_each(l, &mal->commac) { -		struct mal_commac *mc = list_entry(l, struct mal_commac, list); +	MAL_DBG("%d: txde %08x" NL, mal->def->index, deir); -		if (isr & mc->rx_chan_mask) { -			mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask); -		} -	} -	read_unlock(&mal_list_lock); +	if (net_ratelimit()) +		printk(KERN_ERR +		       "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", +		       mal->def->index, deir);  	return IRQ_HANDLED;  } -static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs) +static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)  {  	struct ibm_ocp_mal *mal = dev_instance;  	struct list_head *l; -	unsigned long deir; +	u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); -	deir = get_mal_dcrn(mal, DCRN_MALTXDEIR); +	MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir); -	/* FIXME: print which MAL correctly */ -	printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n", -	       "MAL", deir); - -	read_lock(&mal_list_lock); -	list_for_each(l, &mal->commac) { +	list_for_each(l, &mal->list) {  		struct mal_commac *mc = list_entry(l, struct mal_commac, list); - -		if (deir & mc->tx_chan_mask) { -			mc->ops->txde(mc->dev, deir & mc->tx_chan_mask); +		if (deir & mc->rx_chan_mask) { +			mc->rx_stopped = 1; +			mc->ops->rxde(mc->dev);  		}  	} -	read_unlock(&mal_list_lock); + +	mal_schedule_poll(mal); +	set_mal_dcrn(mal, MAL_RXDEIR, deir);  	return IRQ_HANDLED;  } -/* - * This interrupt should be very rare at best.  This occurs when - * the hardware has a problem with the receive descriptors.  The manual - * states that it occurs when the hardware cannot the receive descriptor - * empty bit is not set.  The recovery mechanism will be to - * traverse through the descriptors, handle any that are marked to be - * handled and reinitialize each along the way.  At that point the driver - * will be restarted. - */ -static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs) +static int mal_poll(struct net_device *ndev, int *budget)  { -	struct ibm_ocp_mal *mal = dev_instance; +	struct ibm_ocp_mal *mal = ndev->priv;  	struct list_head *l; -	unsigned long deir; - -	deir = get_mal_dcrn(mal, DCRN_MALRXDEIR); +	int rx_work_limit = min(ndev->quota, *budget), received = 0, done; + +	MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, +		 rx_work_limit); +      again: +	/* Process TX skbs */ +	list_for_each(l, &mal->poll_list) { +		struct mal_commac *mc = +		    list_entry(l, struct mal_commac, poll_list); +		mc->ops->poll_tx(mc->dev); +	} -	/* -	 * This really is needed.  This case encountered in stress testing. +	/* Process RX skbs. +	 * We _might_ need something more smart here to enforce polling fairness.  	 */ -	if (deir == 0) -		return IRQ_HANDLED; - -	/* FIXME: print which MAL correctly */ -	printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n", -	       "MAL", deir); - -	read_lock(&mal_list_lock); -	list_for_each(l, &mal->commac) { -		struct mal_commac *mc = list_entry(l, struct mal_commac, list); +	list_for_each(l, &mal->poll_list) { +		struct mal_commac *mc = +		    list_entry(l, struct mal_commac, poll_list); +		int n = mc->ops->poll_rx(mc->dev, rx_work_limit); +		if (n) { +			received += n; +			rx_work_limit -= n; +			if (rx_work_limit <= 0) { +				done = 0; +				goto more_work;	// XXX What if this is the last one ? +			} +		} +	} -		if (deir & mc->rx_chan_mask) { -			mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask); +	/* We need to disable IRQs to protect from RXDE IRQ here */ +	local_irq_disable(); +	__netif_rx_complete(ndev); +	mal_enable_eob_irq(mal); +	local_irq_enable(); + +	done = 1; + +	/* Check for "rotting" packet(s) */ +	list_for_each(l, &mal->poll_list) { +		struct mal_commac *mc = +		    list_entry(l, struct mal_commac, poll_list); +		if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) { +			MAL_DBG2("%d: rotting packet" NL, mal->def->index); +			if (netif_rx_reschedule(ndev, received)) +				mal_disable_eob_irq(mal); +			else +				MAL_DBG2("%d: already in poll list" NL, +					 mal->def->index); + +			if (rx_work_limit > 0) +				goto again; +			else +				goto more_work;  		} +		mc->ops->poll_tx(mc->dev);  	} -	read_unlock(&mal_list_lock); -	return IRQ_HANDLED; +      more_work: +	ndev->quota -= received; +	*budget -= received; + +	MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget, +		 done ? 0 : 1); +	return done ? 0 : 1; +} + +static void mal_reset(struct ibm_ocp_mal *mal) +{ +	int n = 10; +	MAL_DBG("%d: reset" NL, mal->def->index); + +	set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); + +	/* Wait for reset to complete (1 system clock) */ +	while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) +		--n; + +	if (unlikely(!n)) +		printk(KERN_ERR "mal%d: reset timeout\n", mal->def->index); +} + +int mal_get_regs_len(struct ibm_ocp_mal *mal) +{ +	return sizeof(struct emac_ethtool_regs_subhdr) + +	    sizeof(struct ibm_mal_regs); +} + +void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf) +{ +	struct emac_ethtool_regs_subhdr *hdr = buf; +	struct ibm_mal_regs *regs = (struct ibm_mal_regs *)(hdr + 1); +	struct ocp_func_mal_data *maldata = mal->def->additions; +	int i; + +	hdr->version = MAL_VERSION; +	hdr->index = mal->def->index; + +	regs->tx_count = maldata->num_tx_chans; +	regs->rx_count = maldata->num_rx_chans; + +	regs->cfg = get_mal_dcrn(mal, MAL_CFG); +	regs->esr = get_mal_dcrn(mal, MAL_ESR); +	regs->ier = get_mal_dcrn(mal, MAL_IER); +	regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); +	regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); +	regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); +	regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); +	regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); +	regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); +	regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); +	regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); + +	for (i = 0; i < regs->tx_count; ++i) +		regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); + +	for (i = 0; i < regs->rx_count; ++i) { +		regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); +		regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); +	} +	return regs + 1;  }  static int __init mal_probe(struct ocp_device *ocpdev)  { -	struct ibm_ocp_mal *mal = NULL; +	struct ibm_ocp_mal *mal;  	struct ocp_func_mal_data *maldata; -	int err = 0; +	int err = 0, i, bd_size; + +	MAL_DBG("%d: probe" NL, ocpdev->def->index); -	maldata = (struct ocp_func_mal_data *)ocpdev->def->additions; +	maldata = ocpdev->def->additions;  	if (maldata == NULL) { -		printk(KERN_ERR "mal%d: Missing additional datas !\n", +		printk(KERN_ERR "mal%d: missing additional data!\n",  		       ocpdev->def->index);  		return -ENODEV;  	} -	mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); -	if (mal == NULL) { +	mal = kzalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); +	if (!mal) {  		printk(KERN_ERR -		       "mal%d: Out of memory allocating MAL structure !\n", +		       "mal%d: out of memory allocating MAL structure!\n",  		       ocpdev->def->index);  		return -ENOMEM;  	} -	memset(mal, 0, sizeof(*mal)); - -	switch (ocpdev->def->index) { -	case 0: -		mal->dcrbase = DCRN_MAL_BASE; -		break; -#ifdef DCRN_MAL1_BASE -	case 1: -		mal->dcrbase = DCRN_MAL1_BASE; -		break; -#endif -	default: -		BUG(); -	} - -	/**************************/ +	mal->dcrbase = maldata->dcr_base; +	mal->def = ocpdev->def; -	INIT_LIST_HEAD(&mal->commac); +	INIT_LIST_HEAD(&mal->poll_list); +	set_bit(__LINK_STATE_START, &mal->poll_dev.state); +	mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; +	mal->poll_dev.poll = mal_poll; +	mal->poll_dev.priv = mal; +	atomic_set(&mal->poll_dev.refcnt, 1); -	set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF); -	set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF); +	INIT_LIST_HEAD(&mal->list); -	set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR);	/* 384 */ -	/* FIXME: Add delay */ +	/* Load power-on reset defaults */ +	mal_reset(mal);  	/* Set the MAL configuration register */ -	set_mal_dcrn(mal, DCRN_MALCR, -		     MALCR_PLBB | MALCR_OPBBL | MALCR_LEA | -		     MALCR_PLBLT_DEFAULT); - -	/* It would be nice to allocate buffers separately for each -	 * channel, but we can't because the channels share the upper -	 * 13 bits of address lines.  Each channels buffer must also -	 * be 4k aligned, so we allocate 4k for each channel.  This is -	 * inefficient FIXME: do better, if possible */ -	mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev, -					       MAL_DT_ALIGN * -					       maldata->num_tx_chans, -					       &mal->tx_phys_addr, GFP_KERNEL); -	if (mal->tx_virt_addr == NULL) { +	set_mal_dcrn(mal, MAL_CFG, MAL_CFG_DEFAULT | MAL_CFG_PLBB | +		     MAL_CFG_OPBBL | MAL_CFG_LEA); + +	mal_enable_eob_irq(mal); + +	/* Allocate space for BD rings */ +	BUG_ON(maldata->num_tx_chans <= 0 || maldata->num_tx_chans > 32); +	BUG_ON(maldata->num_rx_chans <= 0 || maldata->num_rx_chans > 32); +	bd_size = sizeof(struct mal_descriptor) * +	    (NUM_TX_BUFF * maldata->num_tx_chans + +	     NUM_RX_BUFF * maldata->num_rx_chans); +	mal->bd_virt = +	    dma_alloc_coherent(&ocpdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL); + +	if (!mal->bd_virt) {  		printk(KERN_ERR -		       "mal%d: Out of memory allocating MAL descriptors !\n", -		       ocpdev->def->index); +		       "mal%d: out of memory allocating RX/TX descriptors!\n", +		       mal->def->index);  		err = -ENOMEM;  		goto fail;  	} +	memset(mal->bd_virt, 0, bd_size); -	/* God, oh, god, I hate DCRs */ -	set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr); -#ifdef DCRN_MALTXCTP1R -	if (maldata->num_tx_chans > 1) -		set_mal_dcrn(mal, DCRN_MALTXCTP1R, -			     mal->tx_phys_addr + MAL_DT_ALIGN); -#endif				/* DCRN_MALTXCTP1R */ -#ifdef DCRN_MALTXCTP2R -	if (maldata->num_tx_chans > 2) -		set_mal_dcrn(mal, DCRN_MALTXCTP2R, -			     mal->tx_phys_addr + 2 * MAL_DT_ALIGN); -#endif				/* DCRN_MALTXCTP2R */ -#ifdef DCRN_MALTXCTP3R -	if (maldata->num_tx_chans > 3) -		set_mal_dcrn(mal, DCRN_MALTXCTP3R, -			     mal->tx_phys_addr + 3 * MAL_DT_ALIGN); -#endif				/* DCRN_MALTXCTP3R */ -#ifdef DCRN_MALTXCTP4R -	if (maldata->num_tx_chans > 4) -		set_mal_dcrn(mal, DCRN_MALTXCTP4R, -			     mal->tx_phys_addr + 4 * MAL_DT_ALIGN); -#endif				/* DCRN_MALTXCTP4R */ -#ifdef DCRN_MALTXCTP5R -	if (maldata->num_tx_chans > 5) -		set_mal_dcrn(mal, DCRN_MALTXCTP5R, -			     mal->tx_phys_addr + 5 * MAL_DT_ALIGN); -#endif				/* DCRN_MALTXCTP5R */ -#ifdef DCRN_MALTXCTP6R -	if (maldata->num_tx_chans > 6) -		set_mal_dcrn(mal, DCRN_MALTXCTP6R, -			     mal->tx_phys_addr + 6 * MAL_DT_ALIGN); -#endif				/* DCRN_MALTXCTP6R */ -#ifdef DCRN_MALTXCTP7R -	if (maldata->num_tx_chans > 7) -		set_mal_dcrn(mal, DCRN_MALTXCTP7R, -			     mal->tx_phys_addr + 7 * MAL_DT_ALIGN); -#endif				/* DCRN_MALTXCTP7R */ - -	mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev, -					       MAL_DT_ALIGN * -					       maldata->num_rx_chans, -					       &mal->rx_phys_addr, GFP_KERNEL); - -	set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr); -#ifdef DCRN_MALRXCTP1R -	if (maldata->num_rx_chans > 1) -		set_mal_dcrn(mal, DCRN_MALRXCTP1R, -			     mal->rx_phys_addr + MAL_DT_ALIGN); -#endif				/* DCRN_MALRXCTP1R */ -#ifdef DCRN_MALRXCTP2R -	if (maldata->num_rx_chans > 2) -		set_mal_dcrn(mal, DCRN_MALRXCTP2R, -			     mal->rx_phys_addr + 2 * MAL_DT_ALIGN); -#endif				/* DCRN_MALRXCTP2R */ -#ifdef DCRN_MALRXCTP3R -	if (maldata->num_rx_chans > 3) -		set_mal_dcrn(mal, DCRN_MALRXCTP3R, -			     mal->rx_phys_addr + 3 * MAL_DT_ALIGN); -#endif				/* DCRN_MALRXCTP3R */ +	for (i = 0; i < maldata->num_tx_chans; ++i) +		set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + +			     sizeof(struct mal_descriptor) * +			     mal_tx_bd_offset(mal, i)); + +	for (i = 0; i < maldata->num_rx_chans; ++i) +		set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + +			     sizeof(struct mal_descriptor) * +			     mal_rx_bd_offset(mal, i));  	err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);  	if (err) -		goto fail; -	err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal); +		goto fail2; +	err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE", mal);  	if (err) -		goto fail; +		goto fail3;  	err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);  	if (err) -		goto fail; +		goto fail4;  	err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);  	if (err) -		goto fail; +		goto fail5;  	err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);  	if (err) -		goto fail; +		goto fail6; -	set_mal_dcrn(mal, DCRN_MALIER, -		     MALIER_DE | MALIER_NE | MALIER_TE | -		     MALIER_OPBE | MALIER_PLBE); +	/* Enable all MAL SERR interrupt sources */ +	set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS); -	/* Advertise me to the rest of the world */ +	/* Advertise this instance to the rest of the world */  	ocp_set_drvdata(ocpdev, mal); -	printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n", -	       ocpdev->def->index, maldata->num_tx_chans, -	       maldata->num_rx_chans); +	mal_dbg_register(mal->def->index, mal); +	printk(KERN_INFO "mal%d: initialized, %d TX channels, %d RX channels\n", +	       mal->def->index, maldata->num_tx_chans, maldata->num_rx_chans);  	return 0; +      fail6: +	free_irq(maldata->rxde_irq, mal); +      fail5: +	free_irq(maldata->txeob_irq, mal); +      fail4: +	free_irq(maldata->txde_irq, mal); +      fail3: +	free_irq(maldata->serr_irq, mal); +      fail2: +	dma_free_coherent(&ocpdev->dev, bd_size, mal->bd_virt, mal->bd_dma);        fail: -	/* FIXME: dispose requested IRQs ! */ -	if (err && mal) -		kfree(mal); +	kfree(mal);  	return err;  }  static void __exit mal_remove(struct ocp_device *ocpdev)  {  	struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); -	struct ocp_func_mal_data *maldata = ocpdev->def->additions; +	struct ocp_func_mal_data *maldata = mal->def->additions; + +	MAL_DBG("%d: remove" NL, mal->def->index); -	BUG_ON(!maldata); +	/* Syncronize with scheduled polling,  +	   stolen from net/core/dev.c:dev_close()  +	 */ +	clear_bit(__LINK_STATE_START, &mal->poll_dev.state); +	netif_poll_disable(&mal->poll_dev); + +	if (!list_empty(&mal->list)) { +		/* This is *very* bad */ +		printk(KERN_EMERG +		       "mal%d: commac list is not empty on remove!\n", +		       mal->def->index); +	}  	ocp_set_drvdata(ocpdev, NULL); -	/* FIXME: shut down the MAL, deal with dependency with emac */  	free_irq(maldata->serr_irq, mal);  	free_irq(maldata->txde_irq, mal);  	free_irq(maldata->txeob_irq, mal);  	free_irq(maldata->rxde_irq, mal);  	free_irq(maldata->rxeob_irq, mal); -	if (mal->tx_virt_addr) -		dma_free_coherent(&ocpdev->dev, -				  MAL_DT_ALIGN * maldata->num_tx_chans, -				  mal->tx_virt_addr, mal->tx_phys_addr); +	mal_reset(mal); -	if (mal->rx_virt_addr) -		dma_free_coherent(&ocpdev->dev, -				  MAL_DT_ALIGN * maldata->num_rx_chans, -				  mal->rx_virt_addr, mal->rx_phys_addr); +	mal_dbg_register(mal->def->index, NULL); + +	dma_free_coherent(&ocpdev->dev, +			  sizeof(struct mal_descriptor) * +			  (NUM_TX_BUFF * maldata->num_tx_chans + +			   NUM_RX_BUFF * maldata->num_rx_chans), mal->bd_virt, +			  mal->bd_dma);  	kfree(mal);  }  /* Structure for a device driver */  static struct ocp_device_id mal_ids[] = { -	{.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL}, -	{.vendor = OCP_VENDOR_INVALID} +	{ .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_MAL }, +	{ .vendor = OCP_VENDOR_INVALID}  };  static struct ocp_driver mal_driver = { @@ -441,23 +570,14 @@ static struct ocp_driver mal_driver = {  	.remove = mal_remove,  }; -static int __init init_mals(void) +int __init mal_init(void)  { -	int rc; - -	rc = ocp_register_driver(&mal_driver); -	if (rc < 0) { -		ocp_unregister_driver(&mal_driver); -		return -ENODEV; -	} - -	return 0; +	MAL_DBG(": init" NL); +	return ocp_register_driver(&mal_driver);  } -static void __exit exit_mals(void) +void __exit mal_exit(void)  { +	MAL_DBG(": exit" NL);  	ocp_unregister_driver(&mal_driver);  } - -module_init(init_mals); -module_exit(exit_mals);  | 
