/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2009 Cavium Networks
*/
#include <linux/capability.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
#include <linux/phy.h>
#include <linux/spinlock.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-mixx-defs.h>
#include <asm/octeon/cvmx-agl-defs.h>
#define DRV_NAME "octeon_mgmt"
#define DRV_VERSION "2.0"
#define DRV_DESCRIPTION \
"Cavium Networks Octeon MII (management) port Network Driver"
#define OCTEON_MGMT_NAPI_WEIGHT 16
/*
* Ring sizes that are powers of two allow for more efficient modulo
* opertions.
*/
#define OCTEON_MGMT_RX_RING_SIZE 512
#define OCTEON_MGMT_TX_RING_SIZE 128
/* Allow 8 bytes for vlan and FCS. */
#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
union mgmt_port_ring_entry {
u64 d64;
struct {
u64 reserved_62_63:2;
/* Length of the buffer/packet in bytes */
u64 len:14;
/* For TX, signals that the packet should be timestamped */
u64 tstamp:1;
/* The RX error code */
u64 code:7;
#define RING_ENTRY_CODE_DONE 0xf
#define RING_ENTRY_CODE_MORE 0x10
/* Physical address of the buffer */
u64 addr:40;
} s;
};
struct octeon_mgmt {
struct net_device *netdev;
int port;
int irq;
u64 *tx_ring;
dma_addr_t tx_ring_handle;
unsigned int tx_next;
unsigned int tx_next_clean;
unsigned int tx_current_fill;
/* The tx_list lock also protects the ring related variables */
struct sk_buff_head tx_list;
/* RX variables only touched in napi_poll. No locking necessary. */
u64 *rx_ring;
dma_addr_t rx_ring_handle;
unsigned int rx_next;
unsigned int rx_next_fill;
unsigned int rx_current_fill;
struct sk_buff_head rx_list;
spinlock_t lock;
unsigned int last_duplex;
unsigned int last_link;
struct device *dev;
struct napi_struct napi;
struct tasklet_struct tx_clean_tasklet;
struct phy_device *phydev;
};
static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
{
int port = p->port;
union cvmx_mixx_intena mix_intena;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
mix_intena.s.ithena = enable ? 1 : 0;
cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
spin_unlock_irqrestore(&p->lock, flags);
}
static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
{
int port = p->port;
union cvmx_mixx_intena mix_intena;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
mix_intena.s.othena = enable ? 1 : 0;
cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
spin_unlock_irqrestore(&p->lock, flags);
}
static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_rx_irq(p, 1);
}
static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_rx_irq(p, 0);
}
static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
{
octeon_mgmt_set_tx_irq(p, 1);
}