diff options
Diffstat (limited to 'drivers/net')
34 files changed, 1426 insertions, 136 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 78b7167a8ce..39db0e96815 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -837,7 +837,7 @@ static int vortex_resume(struct device *dev) return 0; } -static struct dev_pm_ops vortex_pm_ops = { +static const struct dev_pm_ops vortex_pm_ops = { .suspend = vortex_suspend, .resume = vortex_resume, .freeze = vortex_suspend, diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 6c521b4c319..dd9a09c72df 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1953,6 +1953,8 @@ config BCM63XX_ENET source "drivers/net/fs_enet/Kconfig" +source "drivers/net/octeon/Kconfig" + endif # NET_ETHERNET # diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 246323d7f16..ad1346dd9da 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -285,3 +285,5 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_SFC) += sfc/ obj-$(CONFIG_WIMAX) += wimax/ + +obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index c37ee9e6b67..39e1c0d3947 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig @@ -68,6 +68,7 @@ config W90P910_ETH tristate "Nuvoton w90p910 Ethernet support" depends on ARM && ARCH_W90X900 select PHYLIB + select MII help Say Y here if you want to use built-in Ethernet ports on w90p910 processor. diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 691b81eb0f4..c3dfbdd2cdc 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -322,7 +322,7 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); spin_unlock_irqrestore(&mdio_lock, flags); #if DEBUG_MDIO - printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n", + printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", bus->name, phy_id, location, val, ret); #endif return ret; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 8c658cf6f62..109d2783e4d 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) } __skb_pull(skb, sizeof(*p)); - st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); + st = this_cpu_ptr(sge->port_stats[p->iff]); skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && @@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct sge *sge = adapter->sge; - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], - smp_processor_id()); + struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); struct cpl_tx_pkt *cpl; struct sk_buff *orig_skb = skb; int ret; diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 0cbe3c0e7c0..b3773006568 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -1646,7 +1646,7 @@ dm9000_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops dm9000_drv_pm_ops = { +static const struct dev_pm_ops dm9000_drv_pm_ops = { .suspend = dm9000_drv_suspend, .resume = dm9000_drv_resume, }; diff --git a/drivers/net/ehea/ehea_hcall.h b/drivers/net/ehea/ehea_hcall.h deleted file mode 100644 index 8e7d1c3edc6..00000000000 --- a/drivers/net/ehea/ehea_hcall.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * linux/drivers/net/ehea/ehea_hcall.h - * - * eHEA ethernet device driver for IBM eServer System p - * - * (C) Copyright IBM Corp. 2006 - * - * Authors: - * Christoph Raisch <raisch@de.ibm.com> - * Jan-Bernd Themann <themann@de.ibm.com> - * Thomas Klein <tklein@de.ibm.com> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __EHEA_HCALL_H__ -#define __EHEA_HCALL_H__ - -/** - * This file contains HCALL defines that are to be included in the appropriate - * kernel files later - */ - -#define H_ALLOC_HEA_RESOURCE 0x278 -#define H_MODIFY_HEA_QP 0x250 -#define H_QUERY_HEA_QP 0x254 -#define H_QUERY_HEA 0x258 -#define H_QUERY_HEA_PORT 0x25C -#define H_MODIFY_HEA_PORT 0x260 -#define H_REG_BCMC 0x264 -#define H_DEREG_BCMC 0x268 -#define H_REGISTER_HEA_RPAGES 0x26C -#define H_DISABLE_AND_GET_HEA 0x270 -#define H_GET_HEA_INFO 0x274 -#define H_ADD_CONN 0x284 -#define H_DEL_CONN 0x288 - -#endif /* __EHEA_HCALL_H__ */ diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index f3628c80356..2f8174c248b 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h @@ -33,7 +33,6 @@ #include <asm/hvcall.h> #include "ehea.h" #include "ehea_hw.h" -#include "ehea_hcall.h" /* Some abbreviations used here: * diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index eae4ad749e9..b9fcc981983 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, /* it's OK to use per_cpu_ptr() because BHs are off */ pcpu_lstats = dev->ml_priv; - lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); + lb_stats = this_cpu_ptr(pcpu_lstats); len = skb->len; if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index ad95d5f7b63..8c8515619b8 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c @@ -72,35 +72,6 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) mlx4_bitmap_free_range(bitmap, obj, 1); } -static unsigned long find_aligned_range(unsigned long *bitmap, - u32 start, u32 nbits, - int len, int align) -{ - unsigned long end, i; - -again: - start = ALIGN(start, align); - - while ((start < nbits) && test_bit(start, bitmap)) - start += align; - - if (start >= nbits) - return -1; - - end = start+len; - if (end > nbits) - return -1; - - for (i = start + 1; i < end; i++) { - if (test_bit(i, bitmap)) { - start = i + 1; - goto again; - } - } - - return start; -} - u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) { u32 obj, i; @@ -110,13 +81,13 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) spin_lock(&bitmap->lock); - obj = find_aligned_range(bitmap->table, bitmap->last, - bitmap->max, cnt, align); + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + bitmap->last, cnt, align - 1); if (obj >= bitmap->max) { bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) & bitmap->mask; - obj = find_aligned_range(bitmap->table, 0, bitmap->max, - cnt, align); + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + 0, cnt, align - 1); } if (obj < bitmap->max) { diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 3c16602172f..04f42ae1eda 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -90,6 +90,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) [ 9] = "Q_Key violation counter", [10] = "VMM", [12] = "DPDP", + [15] = "Big LSO headers", [16] = "MW support", [17] = "APM support", [18] = "Atomic ops support", @@ -235,7 +236,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); dev_cap->max_mpts = 1 << (field & 0x3f); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); - dev_cap->reserved_eqs = 1 << (field & 0xf); + dev_cap->reserved_eqs = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); dev_cap->max_eqs = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 291a505fd4f..3cf56d90d85 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -1174,7 +1174,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_port: - for (port = 1; port <= dev->caps.num_ports; port++) + for (--port; port >= 1; --port) mlx4_cleanup_port_info(&priv->port[port]); mlx4_cleanup_mcg_table(dev); diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig new file mode 100644 index 00000000000..1e56bbf3f5c --- /dev/null +++ b/drivers/net/octeon/Kconfig @@ -0,0 +1,10 @@ +config OCTEON_MGMT_ETHERNET + tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" + depends on CPU_CAVIUM_OCTEON + select PHYLIB + select MDIO_OCTEON + default y + help + This option enables the ethernet driver for the management + port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, + CN54XX, CN52XX, and CN6XXX chips. diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile new file mode 100644 index 00000000000..906edecacfd --- /dev/null +++ b/drivers/net/octeon/Makefile @@ -0,0 +1,2 @@ + +obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c new file mode 100644 index 00000000000..050538bf155 --- /dev/null +++ b/drivers/net/octeon/octeon_mgmt.c @@ -0,0 +1,1176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009 Cavium Networks + */ + +#include <linux/capability.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/phy.h> +#include <linux/spinlock.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-mixx-defs.h> +#include <asm/octeon/cvmx-agl-defs.h> + +#define DRV_NAME "octeon_mgmt" +#define DRV_VERSION "2.0" +#define DRV_DESCRIPTION \ + "Cavium Networks Octeon MII (management) port Network Driver" + +#define OCTEON_MGMT_NAPI_WEIGHT 16 + +/* + * Ring sizes that are powers of two allow for more efficient modulo + * opertions. + */ +#define OCTEON_MGMT_RX_RING_SIZE 512 +#define OCTEON_MGMT_TX_RING_SIZE 128 + +/* Allow 8 bytes for vlan and FCS. */ +#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +union mgmt_port_ring_entry { + u64 d64; + struct { + u64 reserved_62_63:2; + /* Length of the buffer/packet in bytes */ + u64 len:14; + /* For TX, signals that the packet should be timestamped */ + u64 tstamp:1; + /* The RX error code */ + u64 code:7; +#define RING_ENTRY_CODE_DONE 0xf +#define RING_ENTRY_CODE_MORE 0x10 + /* Physical address of the buffer */ + u64 addr:40; + } s; +}; + +struct octeon_mgmt { + struct net_device *netdev; + int port; + int irq; + u64 *tx_ring; + dma_addr_t tx_ring_handle; + unsigned int tx_next; + unsigned int tx_next_clean; + unsigned int tx_current_fill; + /* The tx_list lock also protects the ring related variables */ + struct sk_buff_head tx_list; + + /* RX variables only touched in napi_poll. No locking necessary. */ + u64 *rx_ring; + dma_addr_t rx_ring_handle; + unsigned int rx_next; + unsigned int rx_next_fill; + unsigned int rx_current_fill; + struct sk_buff_head rx_list; + + spinlock_t lock; + unsigned int last_duplex; + unsigned int last_link; + struct device *dev; + struct napi_struct napi; + struct tasklet_struct tx_clean_tasklet; + struct phy_device *phydev; +}; + +static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) +{ + int port = p->port; + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.s.ithena = enable ? 1 : 0; + cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) +{ + int port = p->port; + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); + mix_intena.s.othena = enable ? 1 : 0; + cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 1); +} + +static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 0); +} + +static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 1); +} + +static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 0); +} + +static unsigned int ring_max_fill(unsigned int ring_size) +{ + return ring_size - 8; +} + +static unsigned int ring_size_to_bytes(unsigned int ring_size) +{ + return ring_size * sizeof(union mgmt_port_ring_entry); +} + +static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + + while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { + unsigned int size; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + + /* CN56XX pass 1 needs 8 bytes of padding. */ + size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; + + skb = netdev_alloc_skb(netdev, size); + if (!skb) + break; + skb_reserve(skb, NET_IP_ALIGN); + __skb_queue_tail(&p->rx_list, skb); + + re.d64 = 0; + re.s.len = size; + re.s.addr = dma_map_single(p->dev, skb->data, + size, + DMA_FROM_DEVICE); + + /* Put it in the ring. */ + p->rx_ring[p->rx_next_fill] = re.d64; + dma_sync_single_for_device(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->rx_next_fill = + (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill++; + /* Ring the bell. */ + cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); + } +} + +static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) +{ + int port = p->port; + union cvmx_mixx_orcnt mix_orcnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + int cleaned = 0; + unsigned long flags; + + mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + while (mix_orcnt.s.orcnt) { + dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + spin_lock_irqsave(&p->tx_list.lock, flags); + + re.d64 = p->tx_ring[p->tx_next_clean]; + p->tx_next_clean = + (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; + skb = __skb_dequeue(&p->tx_list); + + mix_orcnt.u64 = 0; + mix_orcnt.s.orcnt = 1; + + /* Acknowledge to hardware that we have the buffer. */ + cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); + p->tx_current_fill--; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + cleaned++; + + mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); + } + + if (cleaned && netif_queue_stopped(p->netdev)) + netif_wake_queue(p->netdev); +} + +static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) +{ + struct octeon_mgmt *p = (struct octeon_mgmt *)arg; + octeon_mgmt_clean_tx_buffers(p); + octeon_mgmt_enable_tx_irq(p); +} + +static void octeon_mgmt_update_rx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + unsigned long flags; + u64 drop, bad; + + /* These reads also clear the count registers. */ + drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); + bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); + + if (drop || bad) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.rx_errors += bad; + netdev->stats.rx_dropped += drop; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +static void octeon_mgmt_update_tx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + unsigned long flags; + + union cvmx_agl_gmx_txx_stat0 s0; + union cvmx_agl_gmx_txx_stat1 s1; + + /* These reads also clear the count registers. */ + s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); + s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); + + if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; + netdev->stats.collisions += s1.s.scol + s1.s.mcol; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +/* + * Dequeue a receive skb and its corresponding ring entry. The ring + * entry is returned, *pskb is updated to point to the skb. + */ +static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, + struct sk_buff **pskb) +{ + union mgmt_port_ring_entry re; + + dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + re.d64 = p->rx_ring[p->rx_next]; + p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill--; + *pskb = __skb_dequeue(&p->rx_list); + + dma_unmap_single(p->dev, re.s.addr, + ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, + DMA_FROM_DEVICE); + + return re.d64; +} + + +static int octeon_mgmt_receive_one(struct octeon_mgmt *p) +{ + int port = p->port; + struct net_device *netdev = p->netdev; + union cvmx_mixx_ircnt mix_ircnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + struct sk_buff *skb2; + struct sk_buff *skb_new; + union mgmt_port_ring_entry re2; + int rc = 1; + + + re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); + if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { + /* A good packet, send it up. */ + skb_put(skb, re.s.len); +good: + skb->protocol = eth_type_trans(skb, netdev); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + netdev->last_rx = jiffies; + netif_receive_skb(skb); + rc = 0; + } else if (re.s.code == RING_ENTRY_CODE_MORE) { + /* + * Packet split across skbs. This can happen if we + * increase the MTU. Buffers that are already in the + * rx ring can then end up being too small. As the rx + * ring is refilled, buffers sized for the new MTU + * will be used and we should go back to the normal + * non-split case. + */ + skb_put(skb, re.s.len); + do { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + if (re2.s.code != RING_ENTRY_CODE_MORE + && re2.s.code != RING_ENTRY_CODE_DONE) + goto split_error; + skb_put(skb2, re2.s.len); + skb_new = skb_copy_expand(skb, 0, skb2->len, + GFP_ATOMIC); + if (!skb_new) + goto split_error; + if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), + skb2->len)) + goto split_error; + skb_put(skb_new, skb2->len); + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + skb = skb_new; + } while (re2.s.code == RING_ENTRY_CODE_MORE); + goto good; + } else { + /* Some other error, discard it. */ + dev_kfree_skb_any(skb); + /* + * Error statistics are accumulated in + * octeon_mgmt_update_rx_stats. + */ + } + goto done; +split_error: + /* Discard the whole mess. */ + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + while (re2.s.code == RING_ENTRY_CODE_MORE) { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + dev_kfree_skb_any(skb2); + } + netdev->stats.rx_errors++; + +done: + /* Tell the hardware we processed a packet. */ + mix_ircnt.u64 = 0; + mix_ircnt.s.ircnt = 1; + cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); + return rc; + +} + +static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) +{ + int port = p->port; + unsigned int work_done = 0; + union cvmx_mixx_ircnt mix_ircnt; + int rc; + + + mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + while (work_done < budget && mix_ircnt.s.ircnt) { + + rc = octeon_mgmt_receive_one(p); + if (!rc) + work_done++; + + /* Check for more packets. */ + mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); + } + + octeon_mgmt_rx_fill_ring(p->netdev); + + return work_done; +} + +static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); + struct net_device *netdev = p->netdev; + unsigned int work_done = 0; + + work_done = octeon_mgmt_receive_packets(p, budget); + + if (work_done < budget) { + /* We stopped because no more packets were available. */ + napi_complete(napi); + octeon_mgmt_enable_rx_irq(p); + } + octeon_mgmt_update_rx_stats(netdev); + + return work_done; +} + +/* Reset the hardware to clean state. */ +static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) +{ + union cvmx_mixx_ctl mix_ctl; + union cvmx_mixx_bist mix_bist; + union cvmx_agl_gmx_bist agl_gmx_bist; + + mix_ctl.u64 = 0; + cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + } while (mix_ctl.s.busy); + mix_ctl.s.reset = 1; + cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); + cvmx_read_csr(CVMX_MIXX_CTL(p->port)); + cvmx_wait(64); + + mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); + if (mix_bist.u64) + dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", + (unsigned long long)mix_bist.u64); + + agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); + if (agl_gmx_bist.u64) + dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", + (unsigned long long)agl_gmx_bist.u64); +} + +struct octeon_mgmt_cam_state { + u64 cam[6]; + u64 cam_mask; + int cam_index; +}; + +static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, + unsigned char *addr) +{ + int i; + + for (i = 0; i < 6; i++) + cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); + cs->cam_mask |= (1ULL << cs->cam_index); + cs->cam_index++; +} + +static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + int i; + union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; + union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; + unsigned long flags; + unsigned int prev_packet_enable; + unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ + unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ + struct octeon_mgmt_cam_state cam_state; + struct dev_addr_list *list; + struct list_head *pos; + int available_cam_entries; + + memset(&cam_state, 0, sizeof(cam_state)); + + if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) { + cam_mode = 0; + available_cam_entries = 8; + } else { + /* + * One CAM entry for the primary address, leaves seven + * for the secondary addresses. + */ + available_cam_entries = 7 - netdev->dev_addrs.count; + } + + if (netdev->flags & IFF_MULTICAST) { + if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) + || netdev->mc_count > available_cam_entries) + multicast_mode = 2; /* 1 - Accept all multicast. */ + else + multicast_mode = 0; /* 0 - Use CAM. */ + } + + if (cam_mode == 1) { + /* Add primary address. */ + octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); + list_for_each(pos, &netdev->dev_addrs.list) { + struct netdev_hw_addr *hw_addr; + hw_addr = list_entry(pos, struct netdev_hw_addr, list); + octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr); + list = list->next; + } + } + if (multicast_mode == 0) { + i = netdev->mc_count; + list = netdev->mc_list; + while (i--) { + octeon_mgmt_cam_state_add(&cam_state, list->da_addr); + list = list->next; + } + } + + + spin_lock_irqsave(&p->lock, flags); + + /* Disable packet I/O. */ + agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prev_packet_enable = agl_gmx_prtx.s.en; + agl_gmx_prtx.s.en = 0; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + + + adr_ctl.u64 = 0; + adr_ctl.s.cam_mode = cam_mode; + adr_ctl.s.mcst = multicast_mode; + adr_ctl.s.bcst = 1; /* Allow broadcast */ + + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); + + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); + cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); + + /* Restore packet I/O. */ + agl_gmx_prtx.s.en = prev_packet_enable; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); + + spin_unlock_irqrestore(&p->lock, flags); +} + +static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) +{ + struct sockaddr *sa = addr; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); + + octeon_mgmt_set_rx_filtering(netdev); + + return 0; +} + +static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; + + /* + * Limit the MTU to make sure the ethernet packets are between + * 64 bytes and 16383 bytes. + */ + if (size_without_fcs < 64 || size_without_fcs > 16383) { + dev_warn(p->dev, "MTU must be between %d and %d.\n", + 64 - OCTEON_MGMT_RX_HEADROOM, + 16383 - OCTEON_MGMT_RX_HEADROOM); + return -EINVAL; + } + + netdev->mtu = new_mtu; + + cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); + cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), + (size_without_fcs + 7) & 0xfff8); + + return 0; +} + +static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union cvmx_mixx_isr mixx_isr; + + mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); + + /* Clear any pending interrupts */ + cvmx_write_csr(CVMX_MIXX_ISR(port), + cvmx_read_csr(CVMX_MIXX_ISR(port))); + cvmx_read_csr(CVMX_MIXX_ISR(port)); + + if (mixx_isr.s.irthresh) { + octeon_mgmt_disable_rx_irq(p); + napi_schedule(&p->napi); + } + if (mixx_isr.s.orthresh) { + octeon_mgmt_disable_tx_irq(p); + tasklet_schedule(&p->tx_clean_tasklet); + } + + return IRQ_HANDLED; +} + +static int octeon_mgmt_ioctl(struct net_device *netdev, + struct ifreq *rq, int cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!p->phydev) + return -EINVAL; + + return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); +} + +static void octeon_mgmt_adjust_link(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + unsigned long flags; + int link_changed = 0; + + spin_lock_irqsave(&p->lock, flags); + if (p->phydev->link) { + if (!p->last_link) + link_changed = 1; + if (p->last_duplex != p->phydev->duplex) { + p->last_duplex = p->phydev->duplex; + prtx_cfg.u64 = + cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.s.duplex = p->phydev->duplex; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), + prtx_cfg.u64); + } + } else { + if (p->last_link) + link_changed = -1; + } + p->last_link = p->phydev->link; + spin_unlock_irqrestore(&p->lock, flags); + + if (link_changed != 0) { + if (link_changed > 0) { + netif_carrier_on(netdev); + pr_info("%s: Link is up - %d/%s\n", netdev->name, + p->phydev->speed, + DUPLEX_FULL == p->phydev->duplex ? + "Full" : "Half"); + } else { + netif_carrier_off(netdev); + pr_info("%s: Link is down\n", netdev->name); + } + } +} + +static int octeon_mgmt_init_phy(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + char phy_id[20]; + + if (octeon_is_simulation()) { + /* No PHYs in the simulator. */ + netif_carrier_on(netdev); + return 0; + } + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); + + p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(p->phydev)) { + p->phydev = NULL; + return -1; + } + + phy_start_aneg(p->phydev); + + return 0; +} + +static int octeon_mgmt_open(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union cvmx_mixx_ctl mix_ctl; + union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; + union cvmx_mixx_oring1 oring1; + union cvmx_mixx_iring1 iring1; + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + union cvmx_mixx_irhwm mix_irhwm; + union cvmx_mixx_orhwm mix_orhwm; + union cvmx_mixx_intena mix_intena; + struct sockaddr sa; + + /* Allocate ring buffers. */ + p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + GFP_KERNEL); + if (!p->tx_ring) + return -ENOMEM; + p->tx_ring_handle = + dma_map_single(p->dev, p->tx_ring, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->tx_next = 0; + p->tx_next_clean = 0; + p->tx_current_fill = 0; + + + p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + GFP_KERNEL); + if (!p->rx_ring) + goto err_nomem; + p->rx_ring_handle = + dma_map_single(p->dev, p->rx_ring, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + p->rx_next = 0; + p->rx_next_fill = 0; + p->rx_current_fill = 0; + + octeon_mgmt_reset_hw(p); + + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + + /* Bring it out of reset if needed. */ + if (mix_ctl.s.reset) { + mix_ctl.s.reset = 0; + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); + } while (mix_ctl.s.reset); + } + + agl_gmx_inf_mode.u64 = 0; + agl_gmx_inf_mode.s.en = 1; + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + + oring1.u64 = 0; + oring1.s.obase = p->tx_ring_handle >> 3; + oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; + cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); + + iring1.u64 = 0; + iring1.s.ibase = p->rx_ring_handle >> 3; + iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; + cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); + + /* Disable packet I/O. */ + prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.s.en = 0; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + + memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); + octeon_mgmt_set_mac_address(netdev, &sa); + + octeon_mgmt_change_mtu(netdev, netdev->mtu); + + /* + * Enable the port HW. Packets are not allowed until + * cvmx_mgmt_port_enable() is called. + */ + mix_ctl.u64 = 0; + mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ + mix_ctl.s.en = 1; /* Enable the port */ + mix_ctl.s.nbtarb = 0; /* Arbitration mode */ + /* MII CB-request FIFO programmable high watermark */ + mix_ctl.s.mrq_hwm = 1; + cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); + + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* + * Force compensation values, as they are not + * determined properly by HW + */ + union cvmx_agl_gmx_drv_ctl drv_ctl; + + drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); + if (port) { + drv_ctl.s.byp_en1 = 1; + drv_ctl.s.nctl1 = 6; + drv_ctl.s.pctl1 = 6; + } else { + drv_ctl.s.byp_en = 1; + drv_ctl.s.nctl = 6; + drv_ctl.s.pctl = 6; + } + cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); + } + + octeon_mgmt_rx_fill_ring(netdev); + + /* Clear statistics. */ + /* Clear on read. */ + cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); + cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); + cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); + + cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); + cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); + cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); + + /* Clear any pending interrupts */ + cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); + + if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, + netdev)) { + dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); + goto err_noirq; + } + + /* Interrupt every single RX packet */ + mix_irhwm.u64 = 0; + mix_irhwm.s.irhwm = 0; + cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); + + /* Interrupt when we have 5 or more packets to clean. */ + mix_orhwm.u64 = 0; + mix_orhwm.s.orhwm = 5; + cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); + + /* Enable receive and transmit interrupts */ + mix_intena.u64 = 0; + mix_intena.s.ithena = 1; + mix_intena.s.othena = 1; + cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); + + + /* Enable packet I/O. */ + + rxx_frm_ctl.u64 = 0; + rxx_frm_ctl.s.pre_align = 1; + /* + * When set, disables the length check for non-min sized pkts + * with padding in the client data. + */ + rxx_frm_ctl.s.pad_len = 1; + /* When set, disables the length check for VLAN pkts */ + rxx_frm_ctl.s.vlan_len = 1; + /* When set, PREAMBLE checking is less strict */ + rxx_frm_ctl.s.pre_free = 1; + /* Control Pause Frames can match station SMAC */ + rxx_frm_ctl.s.ctl_smac = 0; + /* Control Pause Frames can match globally assign Multicast address */ + rxx_frm_ctl.s.ctl_mcst = 1; + /* Forward pause information to TX block */ + rxx_frm_ctl.s.ctl_bck = 1; + /* Drop Control Pause Frames */ + rxx_frm_ctl.s.ctl_drp = 1; + /* Strip off the preamble */ + rxx_frm_ctl.s.pre_strp = 1; + /* + * This port is configured to send PREAMBLE+SFD to begin every + * frame. GMX checks that the PREAMBLE is sent correctly. + */ + rxx_frm_ctl.s.pre_chk = 1; + cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); + + /* Enable the AGL block */ + agl_gmx_inf_mode.u64 = 0; + agl_gmx_inf_mode.s.en = 1; + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + + /* Configure the port duplex and enables */ + prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); + prtx_cfg.s.tx_en = 1; + prtx_cfg.s.rx_en = 1; + prtx_cfg.s.en = 1; + p->last_duplex = 1; + prtx_cfg.s.duplex = p->last_duplex; + cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); + + p->last_link = 0; + netif_carrier_off(netdev); + + if (octeon_mgmt_init_phy(netdev)) { + dev_err(p->dev, "Cannot initialize PHY.\n"); + goto err_noirq; + } + + netif_wake_queue(netdev); + napi_enable(&p->napi); + + return 0; +err_noirq: + octeon_mgmt_reset_hw(p); + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); +err_nomem: + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + return -ENOMEM; +} + +static int octeon_mgmt_stop(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + napi_disable(&p->napi); + netif_stop_queue(netdev); + + if (p->phydev) + phy_disconnect(p->phydev); + + netif_carrier_off(netdev); + + octeon_mgmt_reset_hw(p); + + + free_irq(p->irq, netdev); + + /* dma_unmap is a nop on Octeon, so just free everything. */ + skb_queue_purge(&p->tx_list); + skb_queue_purge(&p->rx_list); + + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); + + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + + + return 0; +} + +static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int port = p->port; + union mgmt_port_ring_entry re; + unsigned long flags; + + re.d64 = 0; + re.s.len = skb->len; + re.s.addr = dma_map_single(p->dev, skb->data, + skb->len, + DMA_TO_DEVICE); + + spin_lock_irqsave(&p->tx_list.lock, flags); + + if (unlikely(p->tx_current_fill >= + ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + __skb_queue_tail(&p->tx_list, skb); + + /* Put it in the ring. */ + p->tx_ring[p->tx_next] = re.d64; + p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; + p->tx_current_fill++; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_sync_single_for_device(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + + /* Ring the bell. */ + cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); + + netdev->trans_start = jiffies; + octeon_mgmt_clean_tx_buffers(p); + octeon_mgmt_update_tx_stats(netdev); + return NETDEV_TX_OK; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void octeon_mgmt_poll_controller(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + octeon_mgmt_receive_packets(p, 16); + octeon_mgmt_update_rx_stats(netdev); + return; +} +#endif + +static void octeon_mgmt_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strncpy(info->driver, DRV_NAME, sizeof(info->driver)); + strncpy(info->version, DRV_VERSION, sizeof(info->version)); + strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); + info->n_stats = 0; + info->testinfo_len = 0; + info->regdump_len = 0; + info->eedump_len = 0; +} + +static int octeon_mgmt_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (p->phydev) + return phy_ethtool_gset(p->phydev, cmd); + + return -EINVAL; +} + +static int octeon_mgmt_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (p->phydev) + return phy_ethtool_sset(p->phydev, cmd); + + return -EINVAL; +} + +static const struct ethtool_ops octeon_mgmt_ethtool_ops = { + .get_drvinfo = octeon_mgmt_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_settings = octeon_mgmt_get_settings, + .set_settings = octeon_mgmt_set_settings +}; + +static const struct net_device_ops octeon_mgmt_ops = { + .ndo_open = octeon_mgmt_open, + .ndo_stop = octeon_mgmt_stop, + .ndo_start_xmit = octeon_mgmt_xmit, + .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, + .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, + .ndo_set_mac_address = octeon_mgmt_set_mac_address, + .ndo_do_ioctl = octeon_mgmt_ioctl, + .ndo_change_mtu = octeon_mgmt_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = octeon_mgmt_poll_controller, +#endif +}; + +static int __init octeon_mgmt_probe(struct platform_device *pdev) +{ + struct resource *res_irq; + struct net_device *netdev; + struct octeon_mgmt *p; + int i; + + netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); + if (netdev == NULL) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, netdev); + p = netdev_priv(netdev); + netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, + OCTEON_MGMT_NAPI_WEIGHT); + + p->netdev = netdev; + p->dev = &pdev->dev; + + p->port = pdev->id; + snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + goto err; + + p->irq = res_irq->start; + spin_lock_init(&p->lock); + + skb_queue_head_init(&p->tx_list); + skb_queue_head_init(&p->rx_list); + tasklet_init(&p->tx_clean_tasklet, + octeon_mgmt_clean_tx_tasklet, (unsigned long)p); + + netdev->netdev_ops = &octeon_mgmt_ops; + netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; + + + /* The mgmt ports get the first N MACs. */ + for (i = 0; i < 6; i++) + netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; + netdev->dev_addr[5] += p->port; + + if (p->port >= octeon_bootinfo->mac_addr_count) + dev_err(&pdev->dev, + "Error %s: Using MAC outside of the assigned range: " + "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name, + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + + if (register_netdev(netdev)) + goto err; + + dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); + return 0; +err: + free_netdev(netdev); + return -ENOENT; +} + +static int __exit octeon_mgmt_remove(struct platform_device *pdev) +{ + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + + unregister_netdev(netdev); + free_netdev(netdev); + return 0; +} + +static struct platform_driver octeon_mgmt_driver = { + .driver = { + .name = "octeon_mgmt", + .owner = THIS_MODULE, + }, + .probe = octeon_mgmt_probe, + .remove = __exit_p(octeon_mgmt_remove), +}; + +extern void octeon_mdiobus_force_mod_depencency(void); + +static int __init octeon_mgmt_mod_init(void) +{ + /* Force our mdiobus driver module to be loaded first. */ + octeon_mdiobus_force_mod_depencency(); + return platform_driver_register(&octeon_mgmt_driver); +} + +static void __exit octeon_mgmt_mod_exit(void) +{ + platform_driver_unregister(&octeon_mgmt_driver); +} + +module_init(octeon_mgmt_mod_init); +module_exit(octeon_mgmt_mod_exit); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 81bafd57847..d431b59e7d1 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -270,7 +270,7 @@ static int try_io_port(struct pcmcia_device *link) /* for master/slave multifunction cards */ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; } } else { /* This should be two 16-port windows */ diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 8ad8384fc1c..813aca3fc43 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -426,7 +426,7 @@ static int fmvj18x_config(struct pcmcia_device *link) if (link->io.NumPorts2 != 0) { link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; ret = mfc_try_io_port(link); if (ret != 0) goto failed; } else if (cardtype == UNGERMANN) { diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index c2651aecbda..776cad2f571 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link) /* for master/slave multifunction cards */ link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; } } else { /* This should be two 16-port windows */ diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index cc4853bc025..6dd486d2977 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -454,7 +454,7 @@ static int mhz_mfc_config(struct pcmcia_device *link) link->conf.Attributes |= CONF_ENABLE_SPKR; link->conf.Status = CCSR_AUDIO_ENA; link->irq.Attributes = - IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; + IRQ_TYPE_DYNAMIC_SHARING; link->io.IOAddrLines = 16; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->io.NumPorts2 = 8; diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index a2eda28f903..466fc72698c 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -841,7 +841,7 @@ xirc2ps_config(struct pcmcia_device * link) link->conf.Attributes |= CONF_ENABLE_SPKR; link->conf.Status |= CCSR_AUDIO_ENA; } - link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ; + link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING; link->io.NumPorts2 = 8; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; if (local->dingo) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index d5d8e1c5bc9..fc5938ba3d7 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -115,4 +115,15 @@ config MDIO_GPIO To compile this driver as a module, choose M here: the module will be called mdio-gpio. +config MDIO_OCTEON + tristate "Support for MDIO buses on Octeon SOCs" + depends on CPU_CAVIUM_OCTEON + default y + help + + This module provides a driver for the Octeon MDIO busses. + It is required by the Octeon Ethernet device drivers. + + If in doubt, say Y. + endif # PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index edfaac48cbd..1342585af38 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -20,3 +20,4 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_STE10XP) += ste10Xp.o +obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c new file mode 100644 index 00000000000..61a4461cbda --- /dev/null +++ b/drivers/net/phy/mdio-octeon.c @@ -0,0 +1,180 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009 Cavium Networks + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/phy.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-smix-defs.h> + +#define DRV_VERSION "1.0" +#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver" + +struct octeon_mdiobus { + struct mii_bus *mii_bus; + int unit; + int phy_irq[PHY_MAX_ADDR]; +}; + +static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct octeon_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_rd_dat smi_rd; + int timeout = 1000; + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */ + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64); + + do { + /* + * Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + cvmx_wait(1000); + smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit)); + } while (smi_rd.s.pending && --timeout); + + if (smi_rd.s.val) + return smi_rd.s.dat; + else + return -EIO; +} + +static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, + int regnum, u16 val) +{ + struct octeon_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + int timeout = 1000; + + smi_wr.u64 = 0; + smi_wr.s.dat = val; + cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */ + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64); + + do { + /* + * Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + cvmx_wait(1000); + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit)); + } while (smi_wr.s.pending && --timeout); + + if (timeout <= 0) + return -EIO; + + return 0; +} + +static int __init octeon_mdiobus_probe(struct platform_device *pdev) +{ + struct octeon_mdiobus *bus; + int i; + int err = -ENOENT; + + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + /* The platform_device id is our unit number. */ + bus->unit = pdev->id; + + bus->mii_bus = mdiobus_alloc(); + + if (!bus->mii_bus) + goto err; + + /* + * Standard Octeon evaluation boards don't support phy + * interrupts, we need to poll. + */ + for (i = 0; i < PHY_MAX_ADDR; i++) + bus->phy_irq[i] = PHY_POLL; + + bus->mii_bus->priv = bus; + bus->mii_bus->irq = bus->phy_irq; + bus->mii_bus->name = "mdio-octeon"; + snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit); + bus->mii_bus->parent = &pdev->dev; + + bus->mii_bus->read = octeon_mdiobus_read; + bus->mii_bus->write = octeon_mdiobus_write; + + dev_set_drvdata(&pdev->dev, bus); + + err = mdiobus_register(bus->mii_bus); + if (err) + goto err_register; + + dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); + + return 0; +err_register: + mdiobus_free(bus->mii_bus); + +err: + devm_kfree(&pdev->dev, bus); + return err; +} + +static int __exit octeon_mdiobus_remove(struct platform_device *pdev) +{ + struct octeon_mdiobus *bus; + + bus = dev_get_drvdata(&pdev->dev); + + mdiobus_unregister(bus->mii_bus); + mdiobus_free(bus->mii_bus); + return 0; +} + +static struct platform_driver octeon_mdiobus_driver = { + .driver = { + .name = "mdio-octeon", + .owner = THIS_MODULE, + }, + .probe = octeon_mdiobus_probe, + .remove = __exit_p(octeon_mdiobus_remove), +}; + +void octeon_mdiobus_force_mod_depencency(void) +{ + /* Let ethernet drivers force us to be loaded. */ +} +EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency); + +static int __init octeon_mdiobus_mod_init(void) +{ + return platform_driver_register(&octeon_mdiobus_driver); +} + +static void __exit octeon_mdiobus_mod_exit(void) +{ + platform_driver_unregister(&octeon_mdiobus_driver); +} + +module_init(octeon_mdiobus_mod_init); +module_exit(octeon_mdiobus_mod_exit); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index acfc5a3aa49..60f96c468a2 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -4859,7 +4859,7 @@ out: return 0; } -static struct dev_pm_ops rtl8169_pm_ops = { +static const struct dev_pm_ops rtl8169_pm_ops = { .suspend = rtl8169_suspend, .resume = rtl8169_resume, .freeze = rtl8169_suspend, diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7650f739a81..37f486b65f6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -4685,6 +4685,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); + pdev->d3_delay = 150; return 0; diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 7815bfc300f..54799544bda 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -206,21 +206,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) } } -#elif defined(CONFIG_ARCH_OMAP) - -/* We can only do 16-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - #elif defined(CONFIG_SH_SH4202_MICRODEV) #define SMC_CAN_USE_8BIT 0 diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 20d6095cf41..494cd91ea39 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -2154,7 +2154,7 @@ static int smsc911x_resume(struct device *dev) return (to == 0) ? -EIO : 0; } -static struct dev_pm_ops smsc911x_pm_ops = { +static const struct dev_pm_ops smsc911x_pm_ops = { .suspend = smsc911x_suspend, .resume = smsc911x_resume, }; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 63099c58a6d..3a15de56df9 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *rcv = NULL; struct veth_priv *priv, *rcv_priv; struct veth_net_stats *stats, *rcv_stats; - int length, cpu; + int length; priv = netdev_priv(dev); rcv = priv->peer; rcv_priv = netdev_priv(rcv); - cpu = smp_processor_id(); - stats = per_cpu_ptr(priv->stats, cpu); - rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu); + stats = this_cpu_ptr(priv->stats); + rcv_stats = this_cpu_ptr(rcv_priv->stats); if (!(rcv->flags & IFF_UP)) goto tx_drop; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1ceb9d0f8b9..9cc438282d7 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2689,7 +2689,7 @@ vmxnet3_resume(struct device *device) return 0; } -static struct dev_pm_ops vmxnet3_pm_ops = { +static const struct dev_pm_ops vmxnet3_pm_ops = { .suspend = vmxnet3_suspend, .resume = vmxnet3_resume, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 675b7df632f..27ca859e745 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -63,7 +63,7 @@ #ifndef __iwl_core_h__ #define __iwl_core_h__ -#include <linux/utsrelease.h> +#include <generated/utsrelease.h> /************************ * forward declarations * diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index b9b371bfa30..42611bea76a 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -1365,7 +1365,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) priv->dnld_sent = DNLD_RES_RECEIVED; /* If nothing to do, go back to sleep (?) */ - if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) + if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) priv->psstate = PS_STATE_SLEEP; spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1439,7 +1439,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) } /* Pending events or command responses? */ - if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) { + if (kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) { allowed = 0; lbs_deb_host("pending events or command responses\n"); } diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 6a8d2b291d8..05bb298dfae 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -10,7 +10,7 @@ #include "scan.h" #include "assoc.h" - +#include <linux/kfifo.h> /** sleep_params */ struct sleep_params { @@ -120,7 +120,7 @@ struct lbs_private { u32 resp_len[2]; /* Events sent from hardware to driver */ - struct kfifo *event_fifo; + struct kfifo event_fifo; /** thread to service interrupts */ struct task_struct *main_thread; diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index db38a5a719f..c2975c8e2f2 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -459,7 +459,7 @@ static int lbs_thread(void *data) else if (!list_empty(&priv->cmdpendingq) && !(priv->wakeup_dev_required)) shouldsleep = 0; /* We have a command to send */ - else if (__kfifo_len(priv->event_fifo)) + else if (kfifo_len(&priv->event_fifo)) shouldsleep = 0; /* We have an event to process */ else shouldsleep = 1; /* No command */ @@ -511,10 +511,13 @@ static int lbs_thread(void *data) /* Process hardware events, e.g. card removed, link lost */ spin_lock_irq(&priv->driver_lock); - while (__kfifo_len(priv->event_fifo)) { + while (kfifo_len(&priv->event_fifo)) { u32 event; - __kfifo_get(priv->event_fifo, (unsigned char *) &event, - sizeof(event)); + + if (kfifo_out(&priv->event_fifo, + (unsigned char *) &event, sizeof(event)) != + sizeof(event)) + break; spin_unlock_irq(&priv->driver_lock); lbs_process_event(priv, event); spin_lock_irq(&priv->driver_lock); @@ -883,10 +886,9 @@ static int lbs_init_adapter(struct lbs_private *priv) priv->resp_len[0] = priv->resp_len[1] = 0; /* Create the event FIFO */ - priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL); - if (IS_ERR(priv->event_fifo)) { + ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); + if (ret) { lbs_pr_err("Out of memory allocating event FIFO buffer\n"); - ret = -ENOMEM; goto out; } @@ -901,8 +903,7 @@ static void lbs_free_adapter(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_MAIN); lbs_free_cmd_buffer(priv); - if (priv->event_fifo) - kfifo_free(priv->event_fifo); + kfifo_free(&priv->event_fifo); del_timer(&priv->command_timer); del_timer(&priv->auto_deepsleep_timer); kfree(priv->networks); @@ -1177,7 +1178,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event) if (priv->psstate == PS_STATE_SLEEP) priv->psstate = PS_STATE_AWAKE; - __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32)); + kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); wake_up_interruptible(&priv->waitq); |