diff options
Diffstat (limited to 'drivers/net/lib8390.c')
| -rw-r--r-- | drivers/net/lib8390.c | 100 | 
1 files changed, 51 insertions, 49 deletions
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index 0c5447dac03..00d59ab2f8a 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c @@ -150,19 +150,19 @@ static void __NS8390_init(struct net_device *dev, int startp);   *	card means that approach caused horrible problems like losing serial data   *	at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA   *	chips with FPGA front ends. - *	 + *   *	Ok the logic behind the 8390 is very simple: - *	 + *   *	Things to know   *		- IRQ delivery is asynchronous to the PCI bus   *		- Blocking the local CPU IRQ via spin locks was too slow   *		- The chip has register windows needing locking work - *	 + *   *	So the path was once (I say once as people appear to have changed it   *	in the mean time and it now looks rather bogus if the changes to use   *	disable_irq_nosync_irqsave are disabling the local IRQ) - *	 - *	 + * + *   *		Take the page lock   *		Mask the IRQ on chip   *		Disable the IRQ (but not mask locally- someone seems to have @@ -170,22 +170,22 @@ static void __NS8390_init(struct net_device *dev, int startp);   *			[This must be _nosync as the page lock may otherwise   *				deadlock us]   *		Drop the page lock and turn IRQs back on - *		 + *   *		At this point an existing IRQ may still be running but we can't   *		get a new one - *	 + *   *		Take the lock (so we know the IRQ has terminated) but don't mask   *	the IRQs on the processor   *		Set irqlock [for debug] - *	 + *   *		Transmit (slow as ****) - *	 + *   *		re-enable the IRQ - *	 - *	 + * + *   *	We have to use disable_irq because otherwise you will get delayed   *	interrupts on the APIC bus deadlocking the transmit path. - *	 + *   *	Quite hairy but the chip simply wasn't designed for SMP and you can't   *	even ACK an interrupt without risking corrupting other parallel   *	activities on the chip." [lkml, 25 Jul 2007] @@ -265,7 +265,7 @@ static void ei_tx_timeout(struct net_device *dev)  	int txsr, isr, tickssofar = jiffies - dev->trans_start;  	unsigned long flags; -	ei_local->stat.tx_errors++; +	dev->stats.tx_errors++;  	spin_lock_irqsave(&ei_local->page_lock, flags);  	txsr = ei_inb(e8390_base+EN0_TSR); @@ -276,7 +276,7 @@ static void ei_tx_timeout(struct net_device *dev)  		dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :  		(isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); -	if (!isr && !ei_local->stat.tx_packets) +	if (!isr && !dev->stats.tx_packets)  	{  		/* The 8390 probably hasn't gotten on the cable yet. */  		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */ @@ -374,7 +374,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)  		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);  		spin_unlock(&ei_local->page_lock);  		enable_irq_lockdep_irqrestore(dev->irq, &flags); -		ei_local->stat.tx_errors++; +		dev->stats.tx_errors++;  		return 1;  	} @@ -417,7 +417,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)  	enable_irq_lockdep_irqrestore(dev->irq, &flags);  	dev_kfree_skb (skb); -	ei_local->stat.tx_bytes += send_length; +	dev->stats.tx_bytes += send_length;  	return 0;  } @@ -493,9 +493,9 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)  		if (interrupts & ENISR_COUNTERS)  		{ -			ei_local->stat.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); -			ei_local->stat.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1); -			ei_local->stat.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2); +			dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); +			dev->stats.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1); +			dev->stats.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2);  			ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */  		} @@ -553,7 +553,8 @@ static void __ei_poll(struct net_device *dev)  static void ei_tx_err(struct net_device *dev)  {  	unsigned long e8390_base = dev->base_addr; -	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); +	/* ei_local is used on some platforms via the EI_SHIFT macro */ +	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);  	unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);  	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); @@ -578,10 +579,10 @@ static void ei_tx_err(struct net_device *dev)  		ei_tx_intr(dev);  	else  	{ -		ei_local->stat.tx_errors++; -		if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++; -		if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++; -		if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++; +		dev->stats.tx_errors++; +		if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++; +		if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; +		if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;  	}  } @@ -645,25 +646,25 @@ static void ei_tx_intr(struct net_device *dev)  	/* Minimize Tx latency: update the statistics after we restart TXing. */  	if (status & ENTSR_COL) -		ei_local->stat.collisions++; +		dev->stats.collisions++;  	if (status & ENTSR_PTX) -		ei_local->stat.tx_packets++; +		dev->stats.tx_packets++;  	else  	{ -		ei_local->stat.tx_errors++; +		dev->stats.tx_errors++;  		if (status & ENTSR_ABT)  		{ -			ei_local->stat.tx_aborted_errors++; -			ei_local->stat.collisions += 16; +			dev->stats.tx_aborted_errors++; +			dev->stats.collisions += 16;  		}  		if (status & ENTSR_CRS) -			ei_local->stat.tx_carrier_errors++; +			dev->stats.tx_carrier_errors++;  		if (status & ENTSR_FU) -			ei_local->stat.tx_fifo_errors++; +			dev->stats.tx_fifo_errors++;  		if (status & ENTSR_CDH) -			ei_local->stat.tx_heartbeat_errors++; +			dev->stats.tx_heartbeat_errors++;  		if (status & ENTSR_OWC) -			ei_local->stat.tx_window_errors++; +			dev->stats.tx_window_errors++;  	}  	netif_wake_queue(dev);  } @@ -730,7 +731,7 @@ static void ei_receive(struct net_device *dev)  			&& rx_frame.next != next_frame + 1 - num_rx_pages) {  			ei_local->current_page = rxing_page;  			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); -			ei_local->stat.rx_errors++; +			dev->stats.rx_errors++;  			continue;  		} @@ -740,8 +741,8 @@ static void ei_receive(struct net_device *dev)  				printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",  					   dev->name, rx_frame.count, rx_frame.status,  					   rx_frame.next); -			ei_local->stat.rx_errors++; -			ei_local->stat.rx_length_errors++; +			dev->stats.rx_errors++; +			dev->stats.rx_length_errors++;  		}  		 else if ((pkt_stat & 0x0F) == ENRSR_RXOK)  		{ @@ -753,7 +754,7 @@ static void ei_receive(struct net_device *dev)  				if (ei_debug > 1)  					printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",  						   dev->name, pkt_len); -				ei_local->stat.rx_dropped++; +				dev->stats.rx_dropped++;  				break;  			}  			else @@ -764,10 +765,10 @@ static void ei_receive(struct net_device *dev)  				skb->protocol=eth_type_trans(skb,dev);  				netif_rx(skb);  				dev->last_rx = jiffies; -				ei_local->stat.rx_packets++; -				ei_local->stat.rx_bytes += pkt_len; +				dev->stats.rx_packets++; +				dev->stats.rx_bytes += pkt_len;  				if (pkt_stat & ENRSR_PHY) -					ei_local->stat.multicast++; +					dev->stats.multicast++;  			}  		}  		else @@ -776,10 +777,10 @@ static void ei_receive(struct net_device *dev)  				printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",  					   dev->name, rx_frame.status, rx_frame.next,  					   rx_frame.count); -			ei_local->stat.rx_errors++; +			dev->stats.rx_errors++;  			/* NB: The NIC counts CRC, frame and missed errors. */  			if (pkt_stat & ENRSR_FO) -				ei_local->stat.rx_fifo_errors++; +				dev->stats.rx_fifo_errors++;  		}  		next_frame = rx_frame.next; @@ -816,7 +817,8 @@ static void ei_rx_overrun(struct net_device *dev)  {  	unsigned long e8390_base = dev->base_addr;  	unsigned char was_txing, must_resend = 0; -	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); +	/* ei_local is used on some platforms via the EI_SHIFT macro */ +	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);  	/*  	 * Record whether a Tx was in progress and then issue the @@ -827,7 +829,7 @@ static void ei_rx_overrun(struct net_device *dev)  	if (ei_debug > 1)  		printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); -	ei_local->stat.rx_over_errors++; +	dev->stats.rx_over_errors++;  	/*  	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. @@ -889,16 +891,16 @@ static struct net_device_stats *get_stats(struct net_device *dev)  	/* If the card is stopped, just return the present stats. */  	if (!netif_running(dev)) -		return &ei_local->stat; +		return &dev->stats;  	spin_lock_irqsave(&ei_local->page_lock,flags);  	/* Read the counter registers, assuming we are in page 0. */ -	ei_local->stat.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); -	ei_local->stat.rx_crc_errors   += ei_inb_p(ioaddr + EN0_COUNTER1); -	ei_local->stat.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2); +	dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); +	dev->stats.rx_crc_errors   += ei_inb_p(ioaddr + EN0_COUNTER1); +	dev->stats.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2);  	spin_unlock_irqrestore(&ei_local->page_lock, flags); -	return &ei_local->stat; +	return &dev->stats;  }  /*  | 
