diff options
Diffstat (limited to 'drivers/net/ethernet/altera')
| -rw-r--r-- | drivers/net/ethernet/altera/Kconfig | 9 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/Makefile | 8 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_msgdma.c | 206 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_msgdma.h | 35 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_msgdmahw.h | 162 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_sgdma.c | 540 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_sgdma.h | 36 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_sgdmahw.h | 126 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_tse.h | 537 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_tse_ethtool.c | 275 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_tse_main.c | 1577 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_utils.c | 44 | ||||
| -rw-r--r-- | drivers/net/ethernet/altera/altera_utils.h | 27 | 
13 files changed, 3582 insertions, 0 deletions
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig new file mode 100644 index 00000000000..fdddba51473 --- /dev/null +++ b/drivers/net/ethernet/altera/Kconfig @@ -0,0 +1,9 @@ +config ALTERA_TSE +	tristate "Altera Triple-Speed Ethernet MAC support" +	depends on HAS_DMA +	select PHYLIB +	---help--- +	  This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. + +	  To compile this driver as a module, choose M here. The module +	  will be called alteratse. diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile new file mode 100644 index 00000000000..3eff2fd3997 --- /dev/null +++ b/drivers/net/ethernet/altera/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Altera device drivers. +# + +obj-$(CONFIG_ALTERA_TSE) += altera_tse.o +altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ +altera_msgdma.o altera_sgdma.o altera_utils.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c new file mode 100644 index 00000000000..0fb986ba329 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -0,0 +1,206 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/netdevice.h> +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_msgdmahw.h" +#include "altera_msgdma.h" + +/* No initialization work to do for MSGDMA */ +int msgdma_initialize(struct altera_tse_private *priv) +{ +	return 0; +} + +void msgdma_uninitialize(struct altera_tse_private *priv) +{ +} + +void msgdma_start_rxdma(struct altera_tse_private *priv) +{ +} + +void msgdma_reset(struct altera_tse_private *priv) +{ +	int counter; + +	/* Reset Rx mSGDMA */ +	csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, +		msgdma_csroffs(status)); +	csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr, +		msgdma_csroffs(control)); + +	counter = 0; +	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { +		if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status), +				     MSGDMA_CSR_STAT_RESETTING)) +			break; +		udelay(1); +	} + +	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) +		netif_warn(priv, drv, priv->dev, +			   "TSE Rx mSGDMA resetting bit never cleared!\n"); + +	/* clear all status bits */ +	csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status)); + +	/* Reset Tx mSGDMA */ +	csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, +		msgdma_csroffs(status)); + +	csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr, +		msgdma_csroffs(control)); + +	counter = 0; +	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { +		if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status), +				     MSGDMA_CSR_STAT_RESETTING)) +			break; +		udelay(1); +	} + +	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) +		netif_warn(priv, drv, priv->dev, +			   "TSE Tx mSGDMA resetting bit never cleared!\n"); + +	/* clear all status bits */ +	csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); +} + +void msgdma_disable_rxirq(struct altera_tse_private *priv) +{ +	tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control), +		      MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_enable_rxirq(struct altera_tse_private *priv) +{ +	tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control), +		    MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_disable_txirq(struct altera_tse_private *priv) +{ +	tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control), +		      MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_enable_txirq(struct altera_tse_private *priv) +{ +	tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control), +		    MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_clear_rxirq(struct altera_tse_private *priv) +{ +	csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status)); +} + +void msgdma_clear_txirq(struct altera_tse_private *priv) +{ +	csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status)); +} + +/* return 0 to indicate transmit is pending */ +int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ +	csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc, +		msgdma_descroffs(read_addr_lo)); +	csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc, +		msgdma_descroffs(read_addr_hi)); +	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo)); +	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi)); +	csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len)); +	csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num)); +	csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc, +		msgdma_descroffs(stride)); +	csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc, +		msgdma_descroffs(control)); +	return 0; +} + +u32 msgdma_tx_completions(struct altera_tse_private *priv) +{ +	u32 ready = 0; +	u32 inuse; +	u32 status; + +	/* Get number of sent descriptors */ +	inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level)) +			& 0xffff; + +	if (inuse) { /* Tx FIFO is not empty */ +		ready = priv->tx_prod - priv->tx_cons - inuse - 1; +	} else { +		/* Check for buffered last packet */ +		status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); +		if (status & MSGDMA_CSR_STAT_BUSY) +			ready = priv->tx_prod - priv->tx_cons - 1; +		else +			ready = priv->tx_prod - priv->tx_cons; +	} +	return ready; +} + +/* Put buffer to the mSGDMA RX FIFO + */ +void msgdma_add_rx_desc(struct altera_tse_private *priv, +			struct tse_buffer *rxbuffer) +{ +	u32 len = priv->rx_dma_buf_sz; +	dma_addr_t dma_addr = rxbuffer->dma_addr; +	u32 control = (MSGDMA_DESC_CTL_END_ON_EOP +			| MSGDMA_DESC_CTL_END_ON_LEN +			| MSGDMA_DESC_CTL_TR_COMP_IRQ +			| MSGDMA_DESC_CTL_EARLY_IRQ +			| MSGDMA_DESC_CTL_TR_ERR_IRQ +			| MSGDMA_DESC_CTL_GO); + +	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo)); +	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi)); +	csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc, +		msgdma_descroffs(write_addr_lo)); +	csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc, +		msgdma_descroffs(write_addr_hi)); +	csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len)); +	csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num)); +	csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride)); +	csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control)); +} + +/* status is returned on upper 16 bits, + * length is returned in lower 16 bits + */ +u32 msgdma_rx_status(struct altera_tse_private *priv) +{ +	u32 rxstatus = 0; +	u32 pktlength; +	u32 pktstatus; + +	if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level)) +	    & 0xffff) { +		pktlength = csrrd32(priv->rx_dma_resp, +				    msgdma_respoffs(bytes_transferred)); +		pktstatus = csrrd32(priv->rx_dma_resp, +				    msgdma_respoffs(status)); +		rxstatus = pktstatus; +		rxstatus = rxstatus << 16; +		rxstatus |= (pktlength & 0xffff); +	} +	return rxstatus; +} diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h new file mode 100644 index 00000000000..42cf61c8105 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma.h @@ -0,0 +1,35 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_MSGDMA_H__ +#define __ALTERA_MSGDMA_H__ + +void msgdma_reset(struct altera_tse_private *); +void msgdma_enable_txirq(struct altera_tse_private *); +void msgdma_enable_rxirq(struct altera_tse_private *); +void msgdma_disable_rxirq(struct altera_tse_private *); +void msgdma_disable_txirq(struct altera_tse_private *); +void msgdma_clear_rxirq(struct altera_tse_private *); +void msgdma_clear_txirq(struct altera_tse_private *); +u32 msgdma_tx_completions(struct altera_tse_private *); +void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); +int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); +u32 msgdma_rx_status(struct altera_tse_private *); +int msgdma_initialize(struct altera_tse_private *); +void msgdma_uninitialize(struct altera_tse_private *); +void msgdma_start_rxdma(struct altera_tse_private *); + +#endif /*  __ALTERA_MSGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h new file mode 100644 index 00000000000..e335626e1b6 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -0,0 +1,162 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_MSGDMAHW_H__ +#define __ALTERA_MSGDMAHW_H__ + +/* mSGDMA extended descriptor format + */ +struct msgdma_extended_desc { +	u32 read_addr_lo;	/* data buffer source address low bits */ +	u32 write_addr_lo;	/* data buffer destination address low bits */ +	u32 len;		/* the number of bytes to transfer +				 * per descriptor +				 */ +	u32 burst_seq_num;	/* bit 31:24 write burst +				 * bit 23:16 read burst +				 * bit 15:0  sequence number +				 */ +	u32 stride;		/* bit 31:16 write stride +				 * bit 15:0  read stride +				 */ +	u32 read_addr_hi;	/* data buffer source address high bits */ +	u32 write_addr_hi;	/* data buffer destination address high bits */ +	u32 control;		/* characteristics of the transfer */ +}; + +/* mSGDMA descriptor control field bit definitions + */ +#define MSGDMA_DESC_CTL_SET_CH(x)	((x) & 0xff) +#define MSGDMA_DESC_CTL_GEN_SOP		BIT(8) +#define MSGDMA_DESC_CTL_GEN_EOP		BIT(9) +#define MSGDMA_DESC_CTL_PARK_READS	BIT(10) +#define MSGDMA_DESC_CTL_PARK_WRITES	BIT(11) +#define MSGDMA_DESC_CTL_END_ON_EOP	BIT(12) +#define MSGDMA_DESC_CTL_END_ON_LEN	BIT(13) +#define MSGDMA_DESC_CTL_TR_COMP_IRQ	BIT(14) +#define MSGDMA_DESC_CTL_EARLY_IRQ	BIT(15) +#define MSGDMA_DESC_CTL_TR_ERR_IRQ	(0xff << 16) +#define MSGDMA_DESC_CTL_EARLY_DONE	BIT(24) +/* Writing ‘1’ to the ‘go’ bit commits the entire descriptor into the + * descriptor FIFO(s) + */ +#define MSGDMA_DESC_CTL_GO		BIT(31) + +/* Tx buffer control flags + */ +#define MSGDMA_DESC_CTL_TX_FIRST	(MSGDMA_DESC_CTL_GEN_SOP |	\ +					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\ +					 MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_MIDDLE	(MSGDMA_DESC_CTL_TR_ERR_IRQ |	\ +					 MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_LAST		(MSGDMA_DESC_CTL_GEN_EOP |	\ +					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\ +					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\ +					 MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_SINGLE	(MSGDMA_DESC_CTL_GEN_SOP |	\ +					 MSGDMA_DESC_CTL_GEN_EOP |	\ +					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\ +					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\ +					 MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_RX_SINGLE	(MSGDMA_DESC_CTL_END_ON_EOP |	\ +					 MSGDMA_DESC_CTL_END_ON_LEN |	\ +					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\ +					 MSGDMA_DESC_CTL_EARLY_IRQ |	\ +					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\ +					 MSGDMA_DESC_CTL_GO) + +/* mSGDMA extended descriptor stride definitions + */ +#define MSGDMA_DESC_TX_STRIDE		(0x00010001) +#define MSGDMA_DESC_RX_STRIDE		(0x00010001) + +/* mSGDMA dispatcher control and status register map + */ +struct msgdma_csr { +	u32 status;		/* Read/Clear */ +	u32 control;		/* Read/Write */ +	u32 rw_fill_level;	/* bit 31:16 - write fill level +				 * bit 15:0  - read fill level +				 */ +	u32 resp_fill_level;	/* bit 15:0 */ +	u32 rw_seq_num;		/* bit 31:16 - write sequence number +				 * bit 15:0  - read sequence number +				 */ +	u32 pad[3];		/* reserved */ +}; + +/* mSGDMA CSR status register bit definitions + */ +#define MSGDMA_CSR_STAT_BUSY			BIT(0) +#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY		BIT(1) +#define MSGDMA_CSR_STAT_DESC_BUF_FULL		BIT(2) +#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY		BIT(3) +#define MSGDMA_CSR_STAT_RESP_BUF_FULL		BIT(4) +#define MSGDMA_CSR_STAT_STOPPED			BIT(5) +#define MSGDMA_CSR_STAT_RESETTING		BIT(6) +#define MSGDMA_CSR_STAT_STOPPED_ON_ERR		BIT(7) +#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY	BIT(8) +#define MSGDMA_CSR_STAT_IRQ			BIT(9) +#define MSGDMA_CSR_STAT_MASK			0x3FF +#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ	0x1FF + +#define MSGDMA_CSR_STAT_BUSY_GET(v)			GET_BIT_VALUE(v, 0) +#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v)		GET_BIT_VALUE(v, 1) +#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v)		GET_BIT_VALUE(v, 2) +#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v)		GET_BIT_VALUE(v, 3) +#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v)		GET_BIT_VALUE(v, 4) +#define MSGDMA_CSR_STAT_STOPPED_GET(v)			GET_BIT_VALUE(v, 5) +#define MSGDMA_CSR_STAT_RESETTING_GET(v)		GET_BIT_VALUE(v, 6) +#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v)		GET_BIT_VALUE(v, 7) +#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v)		GET_BIT_VALUE(v, 8) +#define MSGDMA_CSR_STAT_IRQ_GET(v)			GET_BIT_VALUE(v, 9) + +/* mSGDMA CSR control register bit definitions + */ +#define MSGDMA_CSR_CTL_STOP			BIT(0) +#define MSGDMA_CSR_CTL_RESET			BIT(1) +#define MSGDMA_CSR_CTL_STOP_ON_ERR		BIT(2) +#define MSGDMA_CSR_CTL_STOP_ON_EARLY		BIT(3) +#define MSGDMA_CSR_CTL_GLOBAL_INTR		BIT(4) +#define MSGDMA_CSR_CTL_STOP_DESCS		BIT(5) + +/* mSGDMA CSR fill level bits + */ +#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v)		(((v) & 0xffff0000) >> 16) +#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v)		((v) & 0x0000ffff) +#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v)	((v) & 0x0000ffff) + +/* mSGDMA response register map + */ +struct msgdma_response { +	u32 bytes_transferred; +	u32 status; +}; + +#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a)) +#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a)) +#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a)) + +/* mSGDMA response register bit definitions + */ +#define MSGDMA_RESP_EARLY_TERM	BIT(8) +#define MSGDMA_RESP_ERR_MASK	0xFF + +#endif /* __ALTERA_MSGDMA_H__*/ diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c new file mode 100644 index 00000000000..580553d42d3 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -0,0 +1,540 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/list.h> +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_sgdmahw.h" +#include "altera_sgdma.h" + +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, +				struct sgdma_descrip __iomem *ndesc, +				dma_addr_t ndesc_phys, +				dma_addr_t raddr, +				dma_addr_t waddr, +				u16 length, +				int generate_eop, +				int rfixed, +				int wfixed); + +static int sgdma_async_write(struct altera_tse_private *priv, +			      struct sgdma_descrip __iomem *desc); + +static int sgdma_async_read(struct altera_tse_private *priv); + +static dma_addr_t +sgdma_txphysaddr(struct altera_tse_private *priv, +		 struct sgdma_descrip __iomem *desc); + +static dma_addr_t +sgdma_rxphysaddr(struct altera_tse_private *priv, +		 struct sgdma_descrip __iomem *desc); + +static int sgdma_txbusy(struct altera_tse_private *priv); + +static int sgdma_rxbusy(struct altera_tse_private *priv); + +static void +queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer); + +static void +queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer); + +static struct tse_buffer * +dequeue_tx(struct altera_tse_private *priv); + +static struct tse_buffer * +dequeue_rx(struct altera_tse_private *priv); + +static struct tse_buffer * +queue_rx_peekhead(struct altera_tse_private *priv); + +int sgdma_initialize(struct altera_tse_private *priv) +{ +	priv->txctrlreg = SGDMA_CTRLREG_ILASTD | +		      SGDMA_CTRLREG_INTEN; + +	priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | +		      SGDMA_CTRLREG_INTEN | +		      SGDMA_CTRLREG_ILASTD; + +	priv->sgdmadesclen = sizeof(struct sgdma_descrip); + +	INIT_LIST_HEAD(&priv->txlisthd); +	INIT_LIST_HEAD(&priv->rxlisthd); + +	priv->rxdescphys = (dma_addr_t) 0; +	priv->txdescphys = (dma_addr_t) 0; + +	priv->rxdescphys = dma_map_single(priv->device, +					  (void __force *)priv->rx_dma_desc, +					  priv->rxdescmem, DMA_BIDIRECTIONAL); + +	if (dma_mapping_error(priv->device, priv->rxdescphys)) { +		sgdma_uninitialize(priv); +		netdev_err(priv->dev, "error mapping rx descriptor memory\n"); +		return -EINVAL; +	} + +	priv->txdescphys = dma_map_single(priv->device, +					  (void __force *)priv->tx_dma_desc, +					  priv->txdescmem, DMA_TO_DEVICE); + +	if (dma_mapping_error(priv->device, priv->txdescphys)) { +		sgdma_uninitialize(priv); +		netdev_err(priv->dev, "error mapping tx descriptor memory\n"); +		return -EINVAL; +	} + +	/* Initialize descriptor memory to all 0's, sync memory to cache */ +	memset_io(priv->tx_dma_desc, 0, priv->txdescmem); +	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); + +	dma_sync_single_for_device(priv->device, priv->txdescphys, +				   priv->txdescmem, DMA_TO_DEVICE); + +	dma_sync_single_for_device(priv->device, priv->rxdescphys, +				   priv->rxdescmem, DMA_TO_DEVICE); + +	return 0; +} + +void sgdma_uninitialize(struct altera_tse_private *priv) +{ +	if (priv->rxdescphys) +		dma_unmap_single(priv->device, priv->rxdescphys, +				 priv->rxdescmem, DMA_BIDIRECTIONAL); + +	if (priv->txdescphys) +		dma_unmap_single(priv->device, priv->txdescphys, +				 priv->txdescmem, DMA_TO_DEVICE); +} + +/* This function resets the SGDMA controller and clears the + * descriptor memory used for transmits and receives. + */ +void sgdma_reset(struct altera_tse_private *priv) +{ +	/* Initialize descriptor memory to 0 */ +	memset_io(priv->tx_dma_desc, 0, priv->txdescmem); +	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); + +	csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); +	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); + +	csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); +	csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); +} + +/* For SGDMA, interrupts remain enabled after initially enabling, + * so no need to provide implementations for abstract enable + * and disable + */ + +void sgdma_enable_rxirq(struct altera_tse_private *priv) +{ +} + +void sgdma_enable_txirq(struct altera_tse_private *priv) +{ +} + +void sgdma_disable_rxirq(struct altera_tse_private *priv) +{ +} + +void sgdma_disable_txirq(struct altera_tse_private *priv) +{ +} + +void sgdma_clear_rxirq(struct altera_tse_private *priv) +{ +	tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), +		    SGDMA_CTRLREG_CLRINT); +} + +void sgdma_clear_txirq(struct altera_tse_private *priv) +{ +	tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), +		    SGDMA_CTRLREG_CLRINT); +} + +/* transmits buffer through SGDMA. Returns number of buffers + * transmitted, 0 if not possible. + * + * tx_lock is held by the caller + */ +int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ +	struct sgdma_descrip __iomem *descbase = +		(struct sgdma_descrip __iomem *)priv->tx_dma_desc; + +	struct sgdma_descrip __iomem *cdesc = &descbase[0]; +	struct sgdma_descrip __iomem *ndesc = &descbase[1]; + +	/* wait 'til the tx sgdma is ready for the next transmit request */ +	if (sgdma_txbusy(priv)) +		return 0; + +	sgdma_setup_descrip(cdesc,			/* current descriptor */ +			    ndesc,			/* next descriptor */ +			    sgdma_txphysaddr(priv, ndesc), +			    buffer->dma_addr,		/* address of packet to xmit */ +			    0,				/* write addr 0 for tx dma */ +			    buffer->len,		/* length of packet */ +			    SGDMA_CONTROL_EOP,		/* Generate EOP */ +			    0,				/* read fixed */ +			    SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */ + +	sgdma_async_write(priv, cdesc); + +	/* enqueue the request to the pending transmit queue */ +	queue_tx(priv, buffer); + +	return 1; +} + + +/* tx_lock held to protect access to queued tx list + */ +u32 sgdma_tx_completions(struct altera_tse_private *priv) +{ +	u32 ready = 0; + +	if (!sgdma_txbusy(priv) && +	    ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) +	     & SGDMA_CONTROL_HW_OWNED) == 0) && +	    (dequeue_tx(priv))) { +		ready = 1; +	} + +	return ready; +} + +void sgdma_start_rxdma(struct altera_tse_private *priv) +{ +	sgdma_async_read(priv); +} + +void sgdma_add_rx_desc(struct altera_tse_private *priv, +		       struct tse_buffer *rxbuffer) +{ +	queue_rx(priv, rxbuffer); +} + +/* status is returned on upper 16 bits, + * length is returned in lower 16 bits + */ +u32 sgdma_rx_status(struct altera_tse_private *priv) +{ +	struct sgdma_descrip __iomem *base = +		(struct sgdma_descrip __iomem *)priv->rx_dma_desc; +	struct sgdma_descrip __iomem *desc = NULL; +	struct tse_buffer *rxbuffer = NULL; +	unsigned int rxstatus = 0; + +	u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); + +	desc = &base[0]; +	if (sts & SGDMA_STSREG_EOP) { +		unsigned int pktlength = 0; +		unsigned int pktstatus = 0; +		dma_sync_single_for_cpu(priv->device, +					priv->rxdescphys, +					priv->sgdmadesclen, +					DMA_FROM_DEVICE); + +		pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); +		pktstatus = csrrd8(desc, sgdma_descroffs(status)); +		rxstatus = pktstatus & ~SGDMA_STATUS_EOP; +		rxstatus = rxstatus << 16; +		rxstatus |= (pktlength & 0xffff); + +		if (rxstatus) { +			csrwr8(0, desc, sgdma_descroffs(status)); + +			rxbuffer = dequeue_rx(priv); +			if (rxbuffer == NULL) +				netdev_info(priv->dev, +					    "sgdma rx and rx queue empty!\n"); + +			/* Clear control */ +			csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); +			/* clear status */ +			csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); + +			/* kick the rx sgdma after reaping this descriptor */ +			sgdma_async_read(priv); + +		} else { +			/* If the SGDMA indicated an end of packet on recv, +			 * then it's expected that the rxstatus from the +			 * descriptor is non-zero - meaning a valid packet +			 * with a nonzero length, or an error has been +			 * indicated. if not, then all we can do is signal +			 * an error and return no packet received. Most likely +			 * there is a system design error, or an error in the +			 * underlying kernel (cache or cache management problem) +			 */ +			netdev_err(priv->dev, +				   "SGDMA RX Error Info: %x, %x, %x\n", +				   sts, csrrd8(desc, sgdma_descroffs(status)), +				   rxstatus); +		} +	} else if (sts == 0) { +		sgdma_async_read(priv); +	} + +	return rxstatus; +} + + +/* Private functions */ +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, +				struct sgdma_descrip __iomem *ndesc, +				dma_addr_t ndesc_phys, +				dma_addr_t raddr, +				dma_addr_t waddr, +				u16 length, +				int generate_eop, +				int rfixed, +				int wfixed) +{ +	/* Clear the next descriptor as not owned by hardware */ + +	u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); +	ctrl &= ~SGDMA_CONTROL_HW_OWNED; +	csrwr8(ctrl, ndesc, sgdma_descroffs(control)); + +	ctrl = SGDMA_CONTROL_HW_OWNED; +	ctrl |= generate_eop; +	ctrl |= rfixed; +	ctrl |= wfixed; + +	/* Channel is implicitly zero, initialized to 0 by default */ +	csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); +	csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); + +	csrwr32(0, desc, sgdma_descroffs(pad1)); +	csrwr32(0, desc, sgdma_descroffs(pad2)); +	csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); + +	csrwr8(ctrl, desc, sgdma_descroffs(control)); +	csrwr8(0, desc, sgdma_descroffs(status)); +	csrwr8(0, desc, sgdma_descroffs(wburst)); +	csrwr8(0, desc, sgdma_descroffs(rburst)); +	csrwr16(length, desc, sgdma_descroffs(bytes)); +	csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); +} + +/* If hardware is busy, don't restart async read. + * if status register is 0 - meaning initial state, restart async read, + * probably for the first time when populating a receive buffer. + * If read status indicate not busy and a status, restart the async + * DMA read. + */ +static int sgdma_async_read(struct altera_tse_private *priv) +{ +	struct sgdma_descrip __iomem *descbase = +		(struct sgdma_descrip __iomem *)priv->rx_dma_desc; + +	struct sgdma_descrip __iomem *cdesc = &descbase[0]; +	struct sgdma_descrip __iomem *ndesc = &descbase[1]; +	struct tse_buffer *rxbuffer = NULL; + +	if (!sgdma_rxbusy(priv)) { +		rxbuffer = queue_rx_peekhead(priv); +		if (rxbuffer == NULL) { +			netdev_err(priv->dev, "no rx buffers available\n"); +			return 0; +		} + +		sgdma_setup_descrip(cdesc,		/* current descriptor */ +				    ndesc,		/* next descriptor */ +				    sgdma_rxphysaddr(priv, ndesc), +				    0,			/* read addr 0 for rx dma */ +				    rxbuffer->dma_addr, /* write addr for rx dma */ +				    0,			/* read 'til EOP */ +				    0,			/* EOP: NA for rx dma */ +				    0,			/* read fixed: NA for rx dma */ +				    0);			/* SOP: NA for rx DMA */ + +		dma_sync_single_for_device(priv->device, +					   priv->rxdescphys, +					   priv->sgdmadesclen, +					   DMA_TO_DEVICE); + +		csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), +			priv->rx_dma_csr, +			sgdma_csroffs(next_descrip)); + +		csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), +			priv->rx_dma_csr, +			sgdma_csroffs(control)); + +		return 1; +	} + +	return 0; +} + +static int sgdma_async_write(struct altera_tse_private *priv, +			     struct sgdma_descrip __iomem *desc) +{ +	if (sgdma_txbusy(priv)) +		return 0; + +	/* clear control and status */ +	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); +	csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); + +	dma_sync_single_for_device(priv->device, priv->txdescphys, +				   priv->sgdmadesclen, DMA_TO_DEVICE); + +	csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), +		priv->tx_dma_csr, +		sgdma_csroffs(next_descrip)); + +	csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), +		priv->tx_dma_csr, +		sgdma_csroffs(control)); + +	return 1; +} + +static dma_addr_t +sgdma_txphysaddr(struct altera_tse_private *priv, +		 struct sgdma_descrip __iomem *desc) +{ +	dma_addr_t paddr = priv->txdescmem_busaddr; +	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; +	return (dma_addr_t)((uintptr_t)paddr + offs); +} + +static dma_addr_t +sgdma_rxphysaddr(struct altera_tse_private *priv, +		 struct sgdma_descrip __iomem *desc) +{ +	dma_addr_t paddr = priv->rxdescmem_busaddr; +	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; +	return (dma_addr_t)((uintptr_t)paddr + offs); +} + +#define list_remove_head(list, entry, type, member)			\ +	do {								\ +		entry = NULL;						\ +		if (!list_empty(list)) {				\ +			entry = list_entry((list)->next, type, member);	\ +			list_del_init(&entry->member);			\ +		}							\ +	} while (0) + +#define list_peek_head(list, entry, type, member)			\ +	do {								\ +		entry = NULL;						\ +		if (!list_empty(list)) {				\ +			entry = list_entry((list)->next, type, member);	\ +		}							\ +	} while (0) + +/* adds a tse_buffer to the tail of a tx buffer list. + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static void +queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ +	list_add_tail(&buffer->lh, &priv->txlisthd); +} + + +/* adds a tse_buffer to the tail of a rx buffer list + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static void +queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ +	list_add_tail(&buffer->lh, &priv->rxlisthd); +} + +/* dequeues a tse_buffer from the transmit buffer list, otherwise + * returns NULL if empty. + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static struct tse_buffer * +dequeue_tx(struct altera_tse_private *priv) +{ +	struct tse_buffer *buffer = NULL; +	list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh); +	return buffer; +} + +/* dequeues a tse_buffer from the receive buffer list, otherwise + * returns NULL if empty + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static struct tse_buffer * +dequeue_rx(struct altera_tse_private *priv) +{ +	struct tse_buffer *buffer = NULL; +	list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); +	return buffer; +} + +/* dequeues a tse_buffer from the receive buffer list, otherwise + * returns NULL if empty + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list while the + * head is being examined. + */ +static struct tse_buffer * +queue_rx_peekhead(struct altera_tse_private *priv) +{ +	struct tse_buffer *buffer = NULL; +	list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); +	return buffer; +} + +/* check and return rx sgdma status without polling + */ +static int sgdma_rxbusy(struct altera_tse_private *priv) +{ +	return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) +		       & SGDMA_STSREG_BUSY; +} + +/* waits for the tx sgdma to finish it's current operation, returns 0 + * when it transitions to nonbusy, returns 1 if the operation times out + */ +static int sgdma_txbusy(struct altera_tse_private *priv) +{ +	int delay = 0; + +	/* if DMA is busy, wait for current transactino to finish */ +	while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) +		& SGDMA_STSREG_BUSY) && (delay++ < 100)) +		udelay(1); + +	if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) +	    & SGDMA_STSREG_BUSY) { +		netdev_err(priv->dev, "timeout waiting for tx dma\n"); +		return 1; +	} +	return 0; +} diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h new file mode 100644 index 00000000000..584977e29ef --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdma.h @@ -0,0 +1,36 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_SGDMA_H__ +#define __ALTERA_SGDMA_H__ + +void sgdma_reset(struct altera_tse_private *); +void sgdma_enable_txirq(struct altera_tse_private *); +void sgdma_enable_rxirq(struct altera_tse_private *); +void sgdma_disable_rxirq(struct altera_tse_private *); +void sgdma_disable_txirq(struct altera_tse_private *); +void sgdma_clear_rxirq(struct altera_tse_private *); +void sgdma_clear_txirq(struct altera_tse_private *); +int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); +u32 sgdma_tx_completions(struct altera_tse_private *); +void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); +void sgdma_status(struct altera_tse_private *); +u32 sgdma_rx_status(struct altera_tse_private *); +int sgdma_initialize(struct altera_tse_private *); +void sgdma_uninitialize(struct altera_tse_private *); +void sgdma_start_rxdma(struct altera_tse_private *); + +#endif /*  __ALTERA_SGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h new file mode 100644 index 00000000000..85bc33b218d --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -0,0 +1,126 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_SGDMAHW_H__ +#define __ALTERA_SGDMAHW_H__ + +/* SGDMA descriptor structure */ +struct sgdma_descrip { +	u32	raddr; /* address of data to be read */ +	u32	pad1; +	u32	waddr; +	u32	pad2; +	u32	next; +	u32	pad3; +	u16	bytes; +	u8	rburst; +	u8	wburst; +	u16	bytes_xferred;	/* 16 bits, bytes xferred */ + +	/* bit 0: error +	 * bit 1: length error +	 * bit 2: crc error +	 * bit 3: truncated error +	 * bit 4: phy error +	 * bit 5: collision error +	 * bit 6: reserved +	 * bit 7: status eop for recv case +	 */ +	u8	status; + +	/* bit 0: eop +	 * bit 1: read_fixed +	 * bit 2: write fixed +	 * bits 3,4,5,6: Channel (always 0) +	 * bit 7: hardware owned +	 */ +	u8	control; +} __packed; + + +#define SGDMA_STATUS_ERR		BIT(0) +#define SGDMA_STATUS_LENGTH_ERR		BIT(1) +#define SGDMA_STATUS_CRC_ERR		BIT(2) +#define SGDMA_STATUS_TRUNC_ERR		BIT(3) +#define SGDMA_STATUS_PHY_ERR		BIT(4) +#define SGDMA_STATUS_COLL_ERR		BIT(5) +#define SGDMA_STATUS_EOP		BIT(7) + +#define SGDMA_CONTROL_EOP		BIT(0) +#define SGDMA_CONTROL_RD_FIXED		BIT(1) +#define SGDMA_CONTROL_WR_FIXED		BIT(2) + +/* Channel is always 0, so just zero initialize it */ + +#define SGDMA_CONTROL_HW_OWNED		BIT(7) + +/* SGDMA register space */ +struct sgdma_csr { +	/* bit 0: error +	 * bit 1: eop +	 * bit 2: descriptor completed +	 * bit 3: chain completed +	 * bit 4: busy +	 * remainder reserved +	 */ +	u32	status; +	u32	pad1[3]; + +	/* bit 0: interrupt on error +	 * bit 1: interrupt on eop +	 * bit 2: interrupt after every descriptor +	 * bit 3: interrupt after last descrip in a chain +	 * bit 4: global interrupt enable +	 * bit 5: starts descriptor processing +	 * bit 6: stop core on dma error +	 * bit 7: interrupt on max descriptors +	 * bits 8-15: max descriptors to generate interrupt +	 * bit 16: Software reset +	 * bit 17: clears owned by hardware if 0, does not clear otherwise +	 * bit 18: enables descriptor polling mode +	 * bit 19-26: clocks before polling again +	 * bit 27-30: reserved +	 * bit 31: clear interrupt +	 */ +	u32	control; +	u32	pad2[3]; +	u32	next_descrip; +	u32	pad3[3]; +}; + +#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a)) +#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a)) + +#define SGDMA_STSREG_ERR	BIT(0) /* Error */ +#define SGDMA_STSREG_EOP	BIT(1) /* EOP */ +#define SGDMA_STSREG_DESCRIP	BIT(2) /* Descriptor completed */ +#define SGDMA_STSREG_CHAIN	BIT(3) /* Chain completed */ +#define SGDMA_STSREG_BUSY	BIT(4) /* Controller busy */ + +#define SGDMA_CTRLREG_IOE	BIT(0) /* Interrupt on error */ +#define SGDMA_CTRLREG_IOEOP	BIT(1) /* Interrupt on EOP */ +#define SGDMA_CTRLREG_IDESCRIP	BIT(2) /* Interrupt after every descriptor */ +#define SGDMA_CTRLREG_ILASTD	BIT(3) /* Interrupt after last descriptor */ +#define SGDMA_CTRLREG_INTEN	BIT(4) /* Global Interrupt enable */ +#define SGDMA_CTRLREG_START	BIT(5) /* starts descriptor processing */ +#define SGDMA_CTRLREG_STOPERR	BIT(6) /* stop on dma error */ +#define SGDMA_CTRLREG_INTMAX	BIT(7) /* Interrupt on max descriptors */ +#define SGDMA_CTRLREG_RESET	BIT(16)/* Software reset */ +#define SGDMA_CTRLREG_COBHW	BIT(17)/* Clears owned by hardware */ +#define SGDMA_CTRLREG_POLL	BIT(18)/* enables descriptor polling mode */ +#define SGDMA_CTRLREG_CLRINT	BIT(31)/* Clears interrupt */ + +#endif /* __ALTERA_SGDMAHW_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h new file mode 100644 index 00000000000..2adb24d4523 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -0,0 +1,537 @@ +/* Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + *   Dalon Westergreen + *   Thomas Chou + *   Ian Abbott + *   Yuriy Kozlov + *   Tobias Klauser + *   Andriy Smolskyy + *   Roman Bulgakov + *   Dmytro Mytarchuk + *   Matthew Gerlach + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ALTERA_TSE_H__ +#define __ALTERA_TSE_H__ + +#define ALTERA_TSE_RESOURCE_NAME	"altera_tse" + +#include <linux/bitops.h> +#include <linux/if_vlan.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR	10000 +#define ALTERA_TSE_MAC_FIFO_WIDTH		4	/* TX/RX FIFO width in +							 * bytes +							 */ +/* Rx FIFO default settings */ +#define ALTERA_TSE_RX_SECTION_EMPTY	16 +#define ALTERA_TSE_RX_SECTION_FULL	0 +#define ALTERA_TSE_RX_ALMOST_EMPTY	8 +#define ALTERA_TSE_RX_ALMOST_FULL	8 + +/* Tx FIFO default settings */ +#define ALTERA_TSE_TX_SECTION_EMPTY	16 +#define ALTERA_TSE_TX_SECTION_FULL	0 +#define ALTERA_TSE_TX_ALMOST_EMPTY	8 +#define ALTERA_TSE_TX_ALMOST_FULL	3 + +/* MAC function configuration default settings */ +#define ALTERA_TSE_TX_IPG_LENGTH	12 + +#define ALTERA_TSE_PAUSE_QUANTA		0xffff + +#define GET_BIT_VALUE(v, bit)		(((v) >> (bit)) & 0x1) + +/* MAC Command_Config Register Bit Definitions + */ +#define MAC_CMDCFG_TX_ENA			BIT(0) +#define MAC_CMDCFG_RX_ENA			BIT(1) +#define MAC_CMDCFG_XON_GEN			BIT(2) +#define MAC_CMDCFG_ETH_SPEED			BIT(3) +#define MAC_CMDCFG_PROMIS_EN			BIT(4) +#define MAC_CMDCFG_PAD_EN			BIT(5) +#define MAC_CMDCFG_CRC_FWD			BIT(6) +#define MAC_CMDCFG_PAUSE_FWD			BIT(7) +#define MAC_CMDCFG_PAUSE_IGNORE			BIT(8) +#define MAC_CMDCFG_TX_ADDR_INS			BIT(9) +#define MAC_CMDCFG_HD_ENA			BIT(10) +#define MAC_CMDCFG_EXCESS_COL			BIT(11) +#define MAC_CMDCFG_LATE_COL			BIT(12) +#define MAC_CMDCFG_SW_RESET			BIT(13) +#define MAC_CMDCFG_MHASH_SEL			BIT(14) +#define MAC_CMDCFG_LOOP_ENA			BIT(15) +#define MAC_CMDCFG_TX_ADDR_SEL(v)		(((v) & 0x7) << 16) +#define MAC_CMDCFG_MAGIC_ENA			BIT(19) +#define MAC_CMDCFG_SLEEP			BIT(20) +#define MAC_CMDCFG_WAKEUP			BIT(21) +#define MAC_CMDCFG_XOFF_GEN			BIT(22) +#define MAC_CMDCFG_CNTL_FRM_ENA			BIT(23) +#define MAC_CMDCFG_NO_LGTH_CHECK		BIT(24) +#define MAC_CMDCFG_ENA_10			BIT(25) +#define MAC_CMDCFG_RX_ERR_DISC			BIT(26) +#define MAC_CMDCFG_DISABLE_READ_TIMEOUT		BIT(27) +#define MAC_CMDCFG_CNT_RESET			BIT(31) + +#define MAC_CMDCFG_TX_ENA_GET(v)		GET_BIT_VALUE(v, 0) +#define MAC_CMDCFG_RX_ENA_GET(v)		GET_BIT_VALUE(v, 1) +#define MAC_CMDCFG_XON_GEN_GET(v)		GET_BIT_VALUE(v, 2) +#define MAC_CMDCFG_ETH_SPEED_GET(v)		GET_BIT_VALUE(v, 3) +#define MAC_CMDCFG_PROMIS_EN_GET(v)		GET_BIT_VALUE(v, 4) +#define MAC_CMDCFG_PAD_EN_GET(v)		GET_BIT_VALUE(v, 5) +#define MAC_CMDCFG_CRC_FWD_GET(v)		GET_BIT_VALUE(v, 6) +#define MAC_CMDCFG_PAUSE_FWD_GET(v)		GET_BIT_VALUE(v, 7) +#define MAC_CMDCFG_PAUSE_IGNORE_GET(v)		GET_BIT_VALUE(v, 8) +#define MAC_CMDCFG_TX_ADDR_INS_GET(v)		GET_BIT_VALUE(v, 9) +#define MAC_CMDCFG_HD_ENA_GET(v)		GET_BIT_VALUE(v, 10) +#define MAC_CMDCFG_EXCESS_COL_GET(v)		GET_BIT_VALUE(v, 11) +#define MAC_CMDCFG_LATE_COL_GET(v)		GET_BIT_VALUE(v, 12) +#define MAC_CMDCFG_SW_RESET_GET(v)		GET_BIT_VALUE(v, 13) +#define MAC_CMDCFG_MHASH_SEL_GET(v)		GET_BIT_VALUE(v, 14) +#define MAC_CMDCFG_LOOP_ENA_GET(v)		GET_BIT_VALUE(v, 15) +#define MAC_CMDCFG_TX_ADDR_SEL_GET(v)		(((v) >> 16) & 0x7) +#define MAC_CMDCFG_MAGIC_ENA_GET(v)		GET_BIT_VALUE(v, 19) +#define MAC_CMDCFG_SLEEP_GET(v)			GET_BIT_VALUE(v, 20) +#define MAC_CMDCFG_WAKEUP_GET(v)		GET_BIT_VALUE(v, 21) +#define MAC_CMDCFG_XOFF_GEN_GET(v)		GET_BIT_VALUE(v, 22) +#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v)		GET_BIT_VALUE(v, 23) +#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v)		GET_BIT_VALUE(v, 24) +#define MAC_CMDCFG_ENA_10_GET(v)		GET_BIT_VALUE(v, 25) +#define MAC_CMDCFG_RX_ERR_DISC_GET(v)		GET_BIT_VALUE(v, 26) +#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v)	GET_BIT_VALUE(v, 27) +#define MAC_CMDCFG_CNT_RESET_GET(v)		GET_BIT_VALUE(v, 31) + +/* MDIO registers within MAC register Space + */ +struct altera_tse_mdio { +	u32 control;	/* PHY device operation control register */ +	u32 status;	/* PHY device operation status register */ +	u32 phy_id1;	/* Bits 31:16 of PHY identifier */ +	u32 phy_id2;	/* Bits 15:0 of PHY identifier */ +	u32 auto_negotiation_advertisement;	/* Auto-negotiation +							 * advertisement +							 * register +							 */ +	u32 remote_partner_base_page_ability; + +	u32 reg6; +	u32 reg7; +	u32 reg8; +	u32 reg9; +	u32 rega; +	u32 regb; +	u32 regc; +	u32 regd; +	u32 rege; +	u32 regf; +	u32 reg10; +	u32 reg11; +	u32 reg12; +	u32 reg13; +	u32 reg14; +	u32 reg15; +	u32 reg16; +	u32 reg17; +	u32 reg18; +	u32 reg19; +	u32 reg1a; +	u32 reg1b; +	u32 reg1c; +	u32 reg1d; +	u32 reg1e; +	u32 reg1f; +}; + +/* MAC register Space. Note that some of these registers may or may not be + * present depending upon options chosen by the user when the core was + * configured and built. Please consult the Altera Triple Speed Ethernet User + * Guide for details. + */ +struct altera_tse_mac { +	/* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer +	 * specific revision +	 */ +	u32 megacore_revision; +	/* Provides a memory location for user applications to test the device +	 * memory operation. +	 */ +	u32 scratch_pad; +	/* The host processor uses this register to control and configure the +	 * MAC block +	 */ +	u32 command_config; +	/* 32-bit primary MAC address word 0 bits 0 to 31 of the primary +	 * MAC address +	 */ +	u32 mac_addr_0; +	/* 32-bit primary MAC address word 1 bits 32 to 47 of the primary +	 * MAC address +	 */ +	u32 mac_addr_1; +	/* 14-bit maximum frame length. The MAC receive logic */ +	u32 frm_length; +	/* The pause quanta is used in each pause frame sent to a remote +	 * Ethernet device, in increments of 512 Ethernet bit times +	 */ +	u32 pause_quanta; +	/* 12-bit receive FIFO section-empty threshold */ +	u32 rx_section_empty; +	/* 12-bit receive FIFO section-full threshold */ +	u32 rx_section_full; +	/* 12-bit transmit FIFO section-empty threshold */ +	u32 tx_section_empty; +	/* 12-bit transmit FIFO section-full threshold */ +	u32 tx_section_full; +	/* 12-bit receive FIFO almost-empty threshold */ +	u32 rx_almost_empty; +	/* 12-bit receive FIFO almost-full threshold */ +	u32 rx_almost_full; +	/* 12-bit transmit FIFO almost-empty threshold */ +	u32 tx_almost_empty; +	/* 12-bit transmit FIFO almost-full threshold */ +	u32 tx_almost_full; +	/* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */ +	u32 mdio_phy0_addr; +	/* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */ +	u32 mdio_phy1_addr; + +	/* Bit[15:0]—16-bit holdoff quanta */ +	u32 holdoff_quant; + +	/* only if 100/1000 BaseX PCS, reserved otherwise */ +	u32 reserved1[5]; + +	/* Minimum IPG between consecutive transmit frame in terms of bytes */ +	u32 tx_ipg_length; + +	/* IEEE 802.3 oEntity Managed Object Support */ + +	/* The MAC addresses */ +	u32 mac_id_1; +	u32 mac_id_2; + +	/* Number of frames transmitted without error including pause frames */ +	u32 frames_transmitted_ok; +	/* Number of frames received without error including pause frames */ +	u32 frames_received_ok; +	/* Number of frames received with a CRC error */ +	u32 frames_check_sequence_errors; +	/* Frame received with an alignment error */ +	u32 alignment_errors; +	/* Sum of payload and padding octets of frames transmitted without +	 * error +	 */ +	u32 octets_transmitted_ok; +	/* Sum of payload and padding octets of frames received without error */ +	u32 octets_received_ok; + +	/* IEEE 802.3 oPausedEntity Managed Object Support */ + +	/* Number of transmitted pause frames */ +	u32 tx_pause_mac_ctrl_frames; +	/* Number of Received pause frames */ +	u32 rx_pause_mac_ctrl_frames; + +	/* IETF MIB (MIB-II) Object Support */ + +	/* Number of frames received with error */ +	u32 if_in_errors; +	/* Number of frames transmitted with error */ +	u32 if_out_errors; +	/* Number of valid received unicast frames */ +	u32 if_in_ucast_pkts; +	/* Number of valid received multicasts frames (without pause) */ +	u32 if_in_multicast_pkts; +	/* Number of valid received broadcast frames */ +	u32 if_in_broadcast_pkts; +	u32 if_out_discards; +	/* The number of valid unicast frames transmitted */ +	u32 if_out_ucast_pkts; +	/* The number of valid multicast frames transmitted, +	 * excluding pause frames +	 */ +	u32 if_out_multicast_pkts; +	u32 if_out_broadcast_pkts; + +	/* IETF RMON MIB Object Support */ + +	/* Counts the number of dropped packets due to internal errors +	 * of the MAC client. +	 */ +	u32 ether_stats_drop_events; +	/* Total number of bytes received. Good and bad frames. */ +	u32 ether_stats_octets; +	/* Total number of packets received. Counts good and bad packets. */ +	u32 ether_stats_pkts; +	/* Number of packets received with less than 64 bytes. */ +	u32 ether_stats_undersize_pkts; +	/* The number of frames received that are longer than the +	 * value configured in the frm_length register +	 */ +	u32 ether_stats_oversize_pkts; +	/* Number of received packet with 64 bytes */ +	u32 ether_stats_pkts_64_octets; +	/* Frames (good and bad) with 65 to 127 bytes */ +	u32 ether_stats_pkts_65to127_octets; +	/* Frames (good and bad) with 128 to 255 bytes */ +	u32 ether_stats_pkts_128to255_octets; +	/* Frames (good and bad) with 256 to 511 bytes */ +	u32 ether_stats_pkts_256to511_octets; +	/* Frames (good and bad) with 512 to 1023 bytes */ +	u32 ether_stats_pkts_512to1023_octets; +	/* Frames (good and bad) with 1024 to 1518 bytes */ +	u32 ether_stats_pkts_1024to1518_octets; + +	/* Any frame length from 1519 to the maximum length configured in the +	 * frm_length register, if it is greater than 1518 +	 */ +	u32 ether_stats_pkts_1519tox_octets; +	/* Too long frames with CRC error */ +	u32 ether_stats_jabbers; +	/* Too short frames with CRC error */ +	u32 ether_stats_fragments; + +	u32 reserved2; + +	/* FIFO control register */ +	u32 tx_cmd_stat; +	u32 rx_cmd_stat; + +	/* Extended Statistics Counters */ +	u32 msb_octets_transmitted_ok; +	u32 msb_octets_received_ok; +	u32 msb_ether_stats_octets; + +	u32 reserved3; + +	/* Multicast address resolution table, mapped in the controller address +	 * space +	 */ +	u32 hash_table[64]; + +	/* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY +	 * management interface +	 */ +	struct altera_tse_mdio mdio_phy0; +	struct altera_tse_mdio mdio_phy1; + +	/* 4 Supplemental MAC Addresses */ +	u32 supp_mac_addr_0_0; +	u32 supp_mac_addr_0_1; +	u32 supp_mac_addr_1_0; +	u32 supp_mac_addr_1_1; +	u32 supp_mac_addr_2_0; +	u32 supp_mac_addr_2_1; +	u32 supp_mac_addr_3_0; +	u32 supp_mac_addr_3_1; + +	u32 reserved4[8]; + +	/* IEEE 1588v2 Feature */ +	u32 tx_period; +	u32 tx_adjust_fns; +	u32 tx_adjust_ns; +	u32 rx_period; +	u32 rx_adjust_fns; +	u32 rx_adjust_ns; + +	u32 reserved5[42]; +}; + +#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a)) + +/* Transmit and Receive Command Registers Bit Definitions + */ +#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC		BIT(17) +#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16	BIT(18) +#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16	BIT(25) + +/* Wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct tse_buffer { +	struct list_head lh; +	struct sk_buff *skb; +	dma_addr_t dma_addr; +	u32 len; +	int mapped_as_page; +}; + +struct altera_tse_private; + +#define ALTERA_DTYPE_SGDMA 1 +#define ALTERA_DTYPE_MSGDMA 2 + +/* standard DMA interface for SGDMA and MSGDMA */ +struct altera_dmaops { +	int altera_dtype; +	int dmamask; +	void (*reset_dma)(struct altera_tse_private *); +	void (*enable_txirq)(struct altera_tse_private *); +	void (*enable_rxirq)(struct altera_tse_private *); +	void (*disable_txirq)(struct altera_tse_private *); +	void (*disable_rxirq)(struct altera_tse_private *); +	void (*clear_txirq)(struct altera_tse_private *); +	void (*clear_rxirq)(struct altera_tse_private *); +	int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); +	u32 (*tx_completions)(struct altera_tse_private *); +	void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); +	u32 (*get_rx_status)(struct altera_tse_private *); +	int (*init_dma)(struct altera_tse_private *); +	void (*uninit_dma)(struct altera_tse_private *); +	void (*start_rxdma)(struct altera_tse_private *); +}; + +/* This structure is private to each device. + */ +struct altera_tse_private { +	struct net_device *dev; +	struct device *device; +	struct napi_struct napi; + +	/* MAC address space */ +	struct altera_tse_mac __iomem *mac_dev; + +	/* TSE Revision */ +	u32	revision; + +	/* mSGDMA Rx Dispatcher address space */ +	void __iomem *rx_dma_csr; +	void __iomem *rx_dma_desc; +	void __iomem *rx_dma_resp; + +	/* mSGDMA Tx Dispatcher address space */ +	void __iomem *tx_dma_csr; +	void __iomem *tx_dma_desc; + +	/* Rx buffers queue */ +	struct tse_buffer *rx_ring; +	u32 rx_cons; +	u32 rx_prod; +	u32 rx_ring_size; +	u32 rx_dma_buf_sz; + +	/* Tx ring buffer */ +	struct tse_buffer *tx_ring; +	u32 tx_prod; +	u32 tx_cons; +	u32 tx_ring_size; + +	/* Interrupts */ +	u32 tx_irq; +	u32 rx_irq; + +	/* RX/TX MAC FIFO configs */ +	u32 tx_fifo_depth; +	u32 rx_fifo_depth; +	u32 max_mtu; + +	/* Hash filter settings */ +	u32 hash_filter; +	u32 added_unicast; + +	/* Descriptor memory info for managing SGDMA */ +	u32 txdescmem; +	u32 rxdescmem; +	dma_addr_t rxdescmem_busaddr; +	dma_addr_t txdescmem_busaddr; +	u32 txctrlreg; +	u32 rxctrlreg; +	dma_addr_t rxdescphys; +	dma_addr_t txdescphys; +	size_t sgdmadesclen; + +	struct list_head txlisthd; +	struct list_head rxlisthd; + +	/* MAC command_config register protection */ +	spinlock_t mac_cfg_lock; +	/* Tx path protection */ +	spinlock_t tx_lock; +	/* Rx DMA & interrupt control protection */ +	spinlock_t rxdma_irq_lock; + +	/* PHY */ +	int phy_addr;		/* PHY's MDIO address, -1 for autodetection */ +	phy_interface_t phy_iface; +	struct mii_bus *mdio; +	struct phy_device *phydev; +	int oldspeed; +	int oldduplex; +	int oldlink; + +	/* ethtool msglvl option */ +	u32 msg_enable; + +	struct altera_dmaops *dmaops; +}; + +/* Function prototypes + */ +void altera_tse_set_ethtool_ops(struct net_device *); + +static inline +u32 csrrd32(void __iomem *mac, size_t offs) +{ +	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); +	return readl(paddr); +} + +static inline +u16 csrrd16(void __iomem *mac, size_t offs) +{ +	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); +	return readw(paddr); +} + +static inline +u8 csrrd8(void __iomem *mac, size_t offs) +{ +	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); +	return readb(paddr); +} + +static inline +void csrwr32(u32 val, void __iomem *mac, size_t offs) +{ +	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + +	writel(val, paddr); +} + +static inline +void csrwr16(u16 val, void __iomem *mac, size_t offs) +{ +	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + +	writew(val, paddr); +} + +static inline +void csrwr8(u8 val, void __iomem *mac, size_t offs) +{ +	void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + +	writeb(val, paddr); +} + +#endif /* __ALTERA_TSE_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c new file mode 100644 index 00000000000..be72e1e6452 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -0,0 +1,275 @@ +/* Ethtool support for Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + *   Dalon Westergreen + *   Thomas Chou + *   Ian Abbott + *   Yuriy Kozlov + *   Tobias Klauser + *   Andriy Smolskyy + *   Roman Bulgakov + *   Dmytro Mytarchuk + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/phy.h> + +#include "altera_tse.h" + +#define TSE_STATS_LEN	31 +#define TSE_NUM_REGS	128 + +static char const stat_gstrings[][ETH_GSTRING_LEN] = { +	"tx_packets", +	"rx_packets", +	"rx_crc_errors", +	"rx_align_errors", +	"tx_bytes", +	"rx_bytes", +	"tx_pause", +	"rx_pause", +	"rx_errors", +	"tx_errors", +	"rx_unicast", +	"rx_multicast", +	"rx_broadcast", +	"tx_discards", +	"tx_unicast", +	"tx_multicast", +	"tx_broadcast", +	"ether_drops", +	"rx_total_bytes", +	"rx_total_packets", +	"rx_undersize", +	"rx_oversize", +	"rx_64_bytes", +	"rx_65_127_bytes", +	"rx_128_255_bytes", +	"rx_256_511_bytes", +	"rx_512_1023_bytes", +	"rx_1024_1518_bytes", +	"rx_gte_1519_bytes", +	"rx_jabbers", +	"rx_runts", +}; + +static void tse_get_drvinfo(struct net_device *dev, +			    struct ethtool_drvinfo *info) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	u32 rev = ioread32(&priv->mac_dev->megacore_revision); + +	strcpy(info->driver, "altera_tse"); +	strcpy(info->version, "v8.0"); +	snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", +		 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); +	sprintf(info->bus_info, "platform"); +} + +/* Fill in a buffer with the strings which correspond to the + * stats + */ +static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf) +{ +	memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN); +} + +static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, +			   u64 *buf) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	u64 ext; + +	buf[0] = csrrd32(priv->mac_dev, +			 tse_csroffs(frames_transmitted_ok)); +	buf[1] = csrrd32(priv->mac_dev, +			 tse_csroffs(frames_received_ok)); +	buf[2] = csrrd32(priv->mac_dev, +			 tse_csroffs(frames_check_sequence_errors)); +	buf[3] = csrrd32(priv->mac_dev, +			 tse_csroffs(alignment_errors)); + +	/* Extended aOctetsTransmittedOK counter */ +	ext = (u64) csrrd32(priv->mac_dev, +			    tse_csroffs(msb_octets_transmitted_ok)) << 32; + +	ext |= csrrd32(priv->mac_dev, +		       tse_csroffs(octets_transmitted_ok)); +	buf[4] = ext; + +	/* Extended aOctetsReceivedOK counter */ +	ext = (u64) csrrd32(priv->mac_dev, +			    tse_csroffs(msb_octets_received_ok)) << 32; + +	ext |= csrrd32(priv->mac_dev, +		       tse_csroffs(octets_received_ok)); +	buf[5] = ext; + +	buf[6] = csrrd32(priv->mac_dev, +			 tse_csroffs(tx_pause_mac_ctrl_frames)); +	buf[7] = csrrd32(priv->mac_dev, +			 tse_csroffs(rx_pause_mac_ctrl_frames)); +	buf[8] = csrrd32(priv->mac_dev, +			 tse_csroffs(if_in_errors)); +	buf[9] = csrrd32(priv->mac_dev, +			 tse_csroffs(if_out_errors)); +	buf[10] = csrrd32(priv->mac_dev, +			  tse_csroffs(if_in_ucast_pkts)); +	buf[11] = csrrd32(priv->mac_dev, +			  tse_csroffs(if_in_multicast_pkts)); +	buf[12] = csrrd32(priv->mac_dev, +			  tse_csroffs(if_in_broadcast_pkts)); +	buf[13] = csrrd32(priv->mac_dev, +			  tse_csroffs(if_out_discards)); +	buf[14] = csrrd32(priv->mac_dev, +			  tse_csroffs(if_out_ucast_pkts)); +	buf[15] = csrrd32(priv->mac_dev, +			  tse_csroffs(if_out_multicast_pkts)); +	buf[16] = csrrd32(priv->mac_dev, +			  tse_csroffs(if_out_broadcast_pkts)); +	buf[17] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_drop_events)); + +	/* Extended etherStatsOctets counter */ +	ext = (u64) csrrd32(priv->mac_dev, +			    tse_csroffs(msb_ether_stats_octets)) << 32; +	ext |= csrrd32(priv->mac_dev, +		       tse_csroffs(ether_stats_octets)); +	buf[18] = ext; + +	buf[19] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts)); +	buf[20] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_undersize_pkts)); +	buf[21] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_oversize_pkts)); +	buf[22] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts_64_octets)); +	buf[23] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts_65to127_octets)); +	buf[24] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts_128to255_octets)); +	buf[25] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts_256to511_octets)); +	buf[26] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts_512to1023_octets)); +	buf[27] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts_1024to1518_octets)); +	buf[28] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_pkts_1519tox_octets)); +	buf[29] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_jabbers)); +	buf[30] = csrrd32(priv->mac_dev, +			  tse_csroffs(ether_stats_fragments)); +} + +static int tse_sset_count(struct net_device *dev, int sset) +{ +	switch (sset) { +	case ETH_SS_STATS: +		return TSE_STATS_LEN; +	default: +		return -EOPNOTSUPP; +	} +} + +static u32 tse_get_msglevel(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	return priv->msg_enable; +} + +static void tse_set_msglevel(struct net_device *dev, uint32_t data) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	priv->msg_enable = data; +} + +static int tse_reglen(struct net_device *dev) +{ +	return TSE_NUM_REGS * sizeof(u32); +} + +static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, +			 void *regbuf) +{ +	int i; +	struct altera_tse_private *priv = netdev_priv(dev); +	u32 *buf = regbuf; + +	/* Set version to a known value, so ethtool knows +	 * how to do any special formatting of this data. +	 * This version number will need to change if and +	 * when this register table is changed. +	 * +	 * version[31:0] = 1: Dump the first 128 TSE Registers +	 *      Upper bits are all 0 by default +	 * +	 * Upper 16-bits will indicate feature presence for +	 * Ethtool register decoding in future version. +	 */ + +	regs->version = 1; + +	for (i = 0; i < TSE_NUM_REGS; i++) +		buf[i] = csrrd32(priv->mac_dev, i * 4); +} + +static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	struct phy_device *phydev = priv->phydev; + +	if (phydev == NULL) +		return -ENODEV; + +	return phy_ethtool_gset(phydev, cmd); +} + +static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	struct phy_device *phydev = priv->phydev; + +	if (phydev == NULL) +		return -ENODEV; + +	return phy_ethtool_sset(phydev, cmd); +} + +static const struct ethtool_ops tse_ethtool_ops = { +	.get_drvinfo = tse_get_drvinfo, +	.get_regs_len = tse_reglen, +	.get_regs = tse_get_regs, +	.get_link = ethtool_op_get_link, +	.get_settings = tse_get_settings, +	.set_settings = tse_set_settings, +	.get_strings = tse_gstrings, +	.get_sset_count = tse_sset_count, +	.get_ethtool_stats = tse_fill_stats, +	.get_msglevel = tse_get_msglevel, +	.set_msglevel = tse_set_msglevel, +}; + +void altera_tse_set_ethtool_ops(struct net_device *netdev) +{ +	netdev->ethtool_ops = &tse_ethtool_ops; +} diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c new file mode 100644 index 00000000000..7330681574d --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -0,0 +1,1577 @@ +/* Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + *   Dalon Westergreen + *   Thomas Chou + *   Ian Abbott + *   Yuriy Kozlov + *   Tobias Klauser + *   Andriy Smolskyy + *   Roman Bulgakov + *   Dmytro Mytarchuk + *   Matthew Gerlach + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <asm/cacheflush.h> + +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_sgdma.h" +#include "altera_msgdma.h" + +static atomic_t instance_count = ATOMIC_INIT(~0); +/* Module parameters */ +static int debug = -1; +module_param(debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | +					NETIF_MSG_LINK | NETIF_MSG_IFUP | +					NETIF_MSG_IFDOWN); + +#define RX_DESCRIPTORS 64 +static int dma_rx_num = RX_DESCRIPTORS; +module_param(dma_rx_num, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list"); + +#define TX_DESCRIPTORS 64 +static int dma_tx_num = TX_DESCRIPTORS; +module_param(dma_tx_num, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); + + +#define POLL_PHY (-1) + +/* Make sure DMA buffer size is larger than the max frame size + * plus some alignment offset and a VLAN header. If the max frame size is + * 1518, a VLAN header would be additional 4 bytes and additional + * headroom for alignment is 2 bytes, 2048 is just fine. + */ +#define ALTERA_RXDMABUFFER_SIZE	2048 + +/* Allow network stack to resume queueing packets after we've + * finished transmitting at least 1/4 of the packets in the queue. + */ +#define TSE_TX_THRESH(x)	(x->tx_ring_size / 4) + +#define TXQUEUESTOP_THRESHHOLD	2 + +static struct of_device_id altera_tse_ids[]; + +static inline u32 tse_tx_avail(struct altera_tse_private *priv) +{ +	return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; +} + +/* MDIO specific functions + */ +static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ +	struct net_device *ndev = bus->priv; +	struct altera_tse_private *priv = netdev_priv(ndev); + +	/* set MDIO address */ +	csrwr32((mii_id & 0x1f), priv->mac_dev, +		tse_csroffs(mdio_phy0_addr)); + +	/* get the data */ +	return csrrd32(priv->mac_dev, +		       tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; +} + +static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, +				 u16 value) +{ +	struct net_device *ndev = bus->priv; +	struct altera_tse_private *priv = netdev_priv(ndev); + +	/* set MDIO address */ +	csrwr32((mii_id & 0x1f), priv->mac_dev, +		tse_csroffs(mdio_phy0_addr)); + +	/* write the data */ +	csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); +	return 0; +} + +static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	int ret; +	int i; +	struct device_node *mdio_node = NULL; +	struct mii_bus *mdio = NULL; +	struct device_node *child_node = NULL; + +	for_each_child_of_node(priv->device->of_node, child_node) { +		if (of_device_is_compatible(child_node, "altr,tse-mdio")) { +			mdio_node = child_node; +			break; +		} +	} + +	if (mdio_node) { +		netdev_dbg(dev, "FOUND MDIO subnode\n"); +	} else { +		netdev_dbg(dev, "NO MDIO subnode\n"); +		return 0; +	} + +	mdio = mdiobus_alloc(); +	if (mdio == NULL) { +		netdev_err(dev, "Error allocating MDIO bus\n"); +		return -ENOMEM; +	} + +	mdio->name = ALTERA_TSE_RESOURCE_NAME; +	mdio->read = &altera_tse_mdio_read; +	mdio->write = &altera_tse_mdio_write; +	snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id); + +	mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); +	if (mdio->irq == NULL) { +		ret = -ENOMEM; +		goto out_free_mdio; +	} +	for (i = 0; i < PHY_MAX_ADDR; i++) +		mdio->irq[i] = PHY_POLL; + +	mdio->priv = dev; +	mdio->parent = priv->device; + +	ret = of_mdiobus_register(mdio, mdio_node); +	if (ret != 0) { +		netdev_err(dev, "Cannot register MDIO bus %s\n", +			   mdio->id); +		goto out_free_mdio_irq; +	} + +	if (netif_msg_drv(priv)) +		netdev_info(dev, "MDIO bus %s: created\n", mdio->id); + +	priv->mdio = mdio; +	return 0; +out_free_mdio_irq: +	kfree(mdio->irq); +out_free_mdio: +	mdiobus_free(mdio); +	mdio = NULL; +	return ret; +} + +static void altera_tse_mdio_destroy(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); + +	if (priv->mdio == NULL) +		return; + +	if (netif_msg_drv(priv)) +		netdev_info(dev, "MDIO bus %s: removed\n", +			    priv->mdio->id); + +	mdiobus_unregister(priv->mdio); +	kfree(priv->mdio->irq); +	mdiobus_free(priv->mdio); +	priv->mdio = NULL; +} + +static int tse_init_rx_buffer(struct altera_tse_private *priv, +			      struct tse_buffer *rxbuffer, int len) +{ +	rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); +	if (!rxbuffer->skb) +		return -ENOMEM; + +	rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, +						len, +						DMA_FROM_DEVICE); + +	if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) { +		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); +		dev_kfree_skb_any(rxbuffer->skb); +		return -EINVAL; +	} +	rxbuffer->dma_addr &= (dma_addr_t)~3; +	rxbuffer->len = len; +	return 0; +} + +static void tse_free_rx_buffer(struct altera_tse_private *priv, +			       struct tse_buffer *rxbuffer) +{ +	struct sk_buff *skb = rxbuffer->skb; +	dma_addr_t dma_addr = rxbuffer->dma_addr; + +	if (skb != NULL) { +		if (dma_addr) +			dma_unmap_single(priv->device, dma_addr, +					 rxbuffer->len, +					 DMA_FROM_DEVICE); +		dev_kfree_skb_any(skb); +		rxbuffer->skb = NULL; +		rxbuffer->dma_addr = 0; +	} +} + +/* Unmap and free Tx buffer resources + */ +static void tse_free_tx_buffer(struct altera_tse_private *priv, +			       struct tse_buffer *buffer) +{ +	if (buffer->dma_addr) { +		if (buffer->mapped_as_page) +			dma_unmap_page(priv->device, buffer->dma_addr, +				       buffer->len, DMA_TO_DEVICE); +		else +			dma_unmap_single(priv->device, buffer->dma_addr, +					 buffer->len, DMA_TO_DEVICE); +		buffer->dma_addr = 0; +	} +	if (buffer->skb) { +		dev_kfree_skb_any(buffer->skb); +		buffer->skb = NULL; +	} +} + +static int alloc_init_skbufs(struct altera_tse_private *priv) +{ +	unsigned int rx_descs = priv->rx_ring_size; +	unsigned int tx_descs = priv->tx_ring_size; +	int ret = -ENOMEM; +	int i; + +	/* Create Rx ring buffer */ +	priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer), +				GFP_KERNEL); +	if (!priv->rx_ring) +		goto err_rx_ring; + +	/* Create Tx ring buffer */ +	priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer), +				GFP_KERNEL); +	if (!priv->tx_ring) +		goto err_tx_ring; + +	priv->tx_cons = 0; +	priv->tx_prod = 0; + +	/* Init Rx ring */ +	for (i = 0; i < rx_descs; i++) { +		ret = tse_init_rx_buffer(priv, &priv->rx_ring[i], +					 priv->rx_dma_buf_sz); +		if (ret) +			goto err_init_rx_buffers; +	} + +	priv->rx_cons = 0; +	priv->rx_prod = 0; + +	return 0; +err_init_rx_buffers: +	while (--i >= 0) +		tse_free_rx_buffer(priv, &priv->rx_ring[i]); +	kfree(priv->tx_ring); +err_tx_ring: +	kfree(priv->rx_ring); +err_rx_ring: +	return ret; +} + +static void free_skbufs(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	unsigned int rx_descs = priv->rx_ring_size; +	unsigned int tx_descs = priv->tx_ring_size; +	int i; + +	/* Release the DMA TX/RX socket buffers */ +	for (i = 0; i < rx_descs; i++) +		tse_free_rx_buffer(priv, &priv->rx_ring[i]); +	for (i = 0; i < tx_descs; i++) +		tse_free_tx_buffer(priv, &priv->tx_ring[i]); + + +	kfree(priv->tx_ring); +} + +/* Reallocate the skb for the reception process + */ +static inline void tse_rx_refill(struct altera_tse_private *priv) +{ +	unsigned int rxsize = priv->rx_ring_size; +	unsigned int entry; +	int ret; + +	for (; priv->rx_cons - priv->rx_prod > 0; +			priv->rx_prod++) { +		entry = priv->rx_prod % rxsize; +		if (likely(priv->rx_ring[entry].skb == NULL)) { +			ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry], +				priv->rx_dma_buf_sz); +			if (unlikely(ret != 0)) +				break; +			priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); +		} +	} +} + +/* Pull out the VLAN tag and fix up the packet + */ +static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ +	struct ethhdr *eth_hdr; +	u16 vid; +	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && +	    !__vlan_get_tag(skb, &vid)) { +		eth_hdr = (struct ethhdr *)skb->data; +		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); +		skb_pull(skb, VLAN_HLEN); +		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); +	} +} + +/* Receive a packet: retrieve and pass over to upper levels + */ +static int tse_rx(struct altera_tse_private *priv, int limit) +{ +	unsigned int count = 0; +	unsigned int next_entry; +	struct sk_buff *skb; +	unsigned int entry = priv->rx_cons % priv->rx_ring_size; +	u32 rxstatus; +	u16 pktlength; +	u16 pktstatus; + +	while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { +		pktstatus = rxstatus >> 16; +		pktlength = rxstatus & 0xffff; + +		if ((pktstatus & 0xFF) || (pktlength == 0)) +			netdev_err(priv->dev, +				   "RCV pktstatus %08X pktlength %08X\n", +				   pktstatus, pktlength); + +		count++; +		next_entry = (++priv->rx_cons) % priv->rx_ring_size; + +		skb = priv->rx_ring[entry].skb; +		if (unlikely(!skb)) { +			netdev_err(priv->dev, +				   "%s: Inconsistent Rx descriptor chain\n", +				   __func__); +			priv->dev->stats.rx_dropped++; +			break; +		} +		priv->rx_ring[entry].skb = NULL; + +		skb_put(skb, pktlength); + +		/* make cache consistent with receive packet buffer */ +		dma_sync_single_for_cpu(priv->device, +					priv->rx_ring[entry].dma_addr, +					priv->rx_ring[entry].len, +					DMA_FROM_DEVICE); + +		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, +				 priv->rx_ring[entry].len, DMA_FROM_DEVICE); + +		if (netif_msg_pktdata(priv)) { +			netdev_info(priv->dev, "frame received %d bytes\n", +				    pktlength); +			print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, +				       16, 1, skb->data, pktlength, true); +		} + +		tse_rx_vlan(priv->dev, skb); + +		skb->protocol = eth_type_trans(skb, priv->dev); +		skb_checksum_none_assert(skb); + +		napi_gro_receive(&priv->napi, skb); + +		priv->dev->stats.rx_packets++; +		priv->dev->stats.rx_bytes += pktlength; + +		entry = next_entry; + +		tse_rx_refill(priv); +	} + +	return count; +} + +/* Reclaim resources after transmission completes + */ +static int tse_tx_complete(struct altera_tse_private *priv) +{ +	unsigned int txsize = priv->tx_ring_size; +	u32 ready; +	unsigned int entry; +	struct tse_buffer *tx_buff; +	int txcomplete = 0; + +	spin_lock(&priv->tx_lock); + +	ready = priv->dmaops->tx_completions(priv); + +	/* Free sent buffers */ +	while (ready && (priv->tx_cons != priv->tx_prod)) { +		entry = priv->tx_cons % txsize; +		tx_buff = &priv->tx_ring[entry]; + +		if (netif_msg_tx_done(priv)) +			netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n", +				   __func__, priv->tx_prod, priv->tx_cons); + +		if (likely(tx_buff->skb)) +			priv->dev->stats.tx_packets++; + +		tse_free_tx_buffer(priv, tx_buff); +		priv->tx_cons++; + +		txcomplete++; +		ready--; +	} + +	if (unlikely(netif_queue_stopped(priv->dev) && +		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { +		netif_tx_lock(priv->dev); +		if (netif_queue_stopped(priv->dev) && +		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { +			if (netif_msg_tx_done(priv)) +				netdev_dbg(priv->dev, "%s: restart transmit\n", +					   __func__); +			netif_wake_queue(priv->dev); +		} +		netif_tx_unlock(priv->dev); +	} + +	spin_unlock(&priv->tx_lock); +	return txcomplete; +} + +/* NAPI polling function + */ +static int tse_poll(struct napi_struct *napi, int budget) +{ +	struct altera_tse_private *priv = +			container_of(napi, struct altera_tse_private, napi); +	int rxcomplete = 0; +	int txcomplete = 0; +	unsigned long int flags; + +	txcomplete = tse_tx_complete(priv); + +	rxcomplete = tse_rx(priv, budget); + +	if (rxcomplete >= budget || txcomplete > 0) +		return rxcomplete; + +	napi_gro_flush(napi, false); +	__napi_complete(napi); + +	netdev_dbg(priv->dev, +		   "NAPI Complete, did %d packets with budget %d\n", +		   txcomplete+rxcomplete, budget); + +	spin_lock_irqsave(&priv->rxdma_irq_lock, flags); +	priv->dmaops->enable_rxirq(priv); +	priv->dmaops->enable_txirq(priv); +	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); +	return rxcomplete + txcomplete; +} + +/* DMA TX & RX FIFO interrupt routing + */ +static irqreturn_t altera_isr(int irq, void *dev_id) +{ +	struct net_device *dev = dev_id; +	struct altera_tse_private *priv; +	unsigned long int flags; + +	if (unlikely(!dev)) { +		pr_err("%s: invalid dev pointer\n", __func__); +		return IRQ_NONE; +	} +	priv = netdev_priv(dev); + +	/* turn off desc irqs and enable napi rx */ +	spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + +	if (likely(napi_schedule_prep(&priv->napi))) { +		priv->dmaops->disable_rxirq(priv); +		priv->dmaops->disable_txirq(priv); +		__napi_schedule(&priv->napi); +	} + +	/* reset IRQs */ +	priv->dmaops->clear_rxirq(priv); +	priv->dmaops->clear_txirq(priv); + +	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + +	return IRQ_HANDLED; +} + +/* Transmit a packet (called by the kernel). Dispatches + * either the SGDMA method for transmitting or the + * MSGDMA method, assumes no scatter/gather support, + * implying an assumption that there's only one + * physically contiguous fragment starting at + * skb->data, for length of skb_headlen(skb). + */ +static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	unsigned int txsize = priv->tx_ring_size; +	unsigned int entry; +	struct tse_buffer *buffer = NULL; +	int nfrags = skb_shinfo(skb)->nr_frags; +	unsigned int nopaged_len = skb_headlen(skb); +	enum netdev_tx ret = NETDEV_TX_OK; +	dma_addr_t dma_addr; + +	spin_lock_bh(&priv->tx_lock); + +	if (unlikely(tse_tx_avail(priv) < nfrags + 1)) { +		if (!netif_queue_stopped(dev)) { +			netif_stop_queue(dev); +			/* This is a hard error, log it. */ +			netdev_err(priv->dev, +				   "%s: Tx list full when queue awake\n", +				   __func__); +		} +		ret = NETDEV_TX_BUSY; +		goto out; +	} + +	/* Map the first skb fragment */ +	entry = priv->tx_prod % txsize; +	buffer = &priv->tx_ring[entry]; + +	dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, +				  DMA_TO_DEVICE); +	if (dma_mapping_error(priv->device, dma_addr)) { +		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); +		ret = NETDEV_TX_OK; +		goto out; +	} + +	buffer->skb = skb; +	buffer->dma_addr = dma_addr; +	buffer->len = nopaged_len; + +	/* Push data out of the cache hierarchy into main memory */ +	dma_sync_single_for_device(priv->device, buffer->dma_addr, +				   buffer->len, DMA_TO_DEVICE); + +	priv->dmaops->tx_buffer(priv, buffer); + +	skb_tx_timestamp(skb); + +	priv->tx_prod++; +	dev->stats.tx_bytes += skb->len; + +	if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) { +		if (netif_msg_hw(priv)) +			netdev_dbg(priv->dev, "%s: stop transmitted packets\n", +				   __func__); +		netif_stop_queue(dev); +	} + +out: +	spin_unlock_bh(&priv->tx_lock); + +	return ret; +} + +/* Called every time the controller might need to be made + * aware of new link state.  The PHY code conveys this + * information through variables in the phydev structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void altera_tse_adjust_link(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	struct phy_device *phydev = priv->phydev; +	int new_state = 0; + +	/* only change config if there is a link */ +	spin_lock(&priv->mac_cfg_lock); +	if (phydev->link) { +		/* Read old config */ +		u32 cfg_reg = ioread32(&priv->mac_dev->command_config); + +		/* Check duplex */ +		if (phydev->duplex != priv->oldduplex) { +			new_state = 1; +			if (!(phydev->duplex)) +				cfg_reg |= MAC_CMDCFG_HD_ENA; +			else +				cfg_reg &= ~MAC_CMDCFG_HD_ENA; + +			netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n", +				   dev->name, phydev->duplex); + +			priv->oldduplex = phydev->duplex; +		} + +		/* Check speed */ +		if (phydev->speed != priv->oldspeed) { +			new_state = 1; +			switch (phydev->speed) { +			case 1000: +				cfg_reg |= MAC_CMDCFG_ETH_SPEED; +				cfg_reg &= ~MAC_CMDCFG_ENA_10; +				break; +			case 100: +				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; +				cfg_reg &= ~MAC_CMDCFG_ENA_10; +				break; +			case 10: +				cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; +				cfg_reg |= MAC_CMDCFG_ENA_10; +				break; +			default: +				if (netif_msg_link(priv)) +					netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n", +						    phydev->speed); +				break; +			} +			priv->oldspeed = phydev->speed; +		} +		iowrite32(cfg_reg, &priv->mac_dev->command_config); + +		if (!priv->oldlink) { +			new_state = 1; +			priv->oldlink = 1; +		} +	} else if (priv->oldlink) { +		new_state = 1; +		priv->oldlink = 0; +		priv->oldspeed = 0; +		priv->oldduplex = -1; +	} + +	if (new_state && netif_msg_link(priv)) +		phy_print_status(phydev); + +	spin_unlock(&priv->mac_cfg_lock); +} +static struct phy_device *connect_local_phy(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	struct phy_device *phydev = NULL; +	char phy_id_fmt[MII_BUS_ID_SIZE + 3]; + +	if (priv->phy_addr != POLL_PHY) { +		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, +			 priv->mdio->id, priv->phy_addr); + +		netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt); + +		phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, +				     priv->phy_iface); +		if (IS_ERR(phydev)) +			netdev_err(dev, "Could not attach to PHY\n"); + +	} else { +		int ret; +		phydev = phy_find_first(priv->mdio); +		if (phydev == NULL) { +			netdev_err(dev, "No PHY found\n"); +			return phydev; +		} + +		ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link, +				priv->phy_iface); +		if (ret != 0) { +			netdev_err(dev, "Could not attach to PHY\n"); +			phydev = NULL; +		} +	} +	return phydev; +} + +/* Initialize driver's PHY state, and attach to the PHY + */ +static int init_phy(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	struct phy_device *phydev; +	struct device_node *phynode; + +	priv->oldlink = 0; +	priv->oldspeed = 0; +	priv->oldduplex = -1; + +	phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); + +	if (!phynode) { +		netdev_dbg(dev, "no phy-handle found\n"); +		if (!priv->mdio) { +			netdev_err(dev, +				   "No phy-handle nor local mdio specified\n"); +			return -ENODEV; +		} +		phydev = connect_local_phy(dev); +	} else { +		netdev_dbg(dev, "phy-handle found\n"); +		phydev = of_phy_connect(dev, phynode, +			&altera_tse_adjust_link, 0, priv->phy_iface); +	} + +	if (!phydev) { +		netdev_err(dev, "Could not find the PHY\n"); +		return -ENODEV; +	} + +	/* Stop Advertising 1000BASE Capability if interface is not GMII +	 * Note: Checkpatch throws CHECKs for the camel case defines below, +	 * it's ok to ignore. +	 */ +	if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || +	    (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) +		phydev->advertising &= ~(SUPPORTED_1000baseT_Half | +					 SUPPORTED_1000baseT_Full); + +	/* Broken HW is sometimes missing the pull-up resistor on the +	 * MDIO line, which results in reads to non-existent devices returning +	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent +	 * device as well. +	 * Note: phydev->phy_id is the result of reading the UID PHY registers. +	 */ +	if (phydev->phy_id == 0) { +		netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); +		phy_disconnect(phydev); +		return -ENODEV; +	} + +	netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", +		   phydev->addr, phydev->phy_id, phydev->link); + +	priv->phydev = phydev; +	return 0; +} + +static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) +{ +	u32 msb; +	u32 lsb; + +	msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; +	lsb = ((addr[5] << 8) | addr[4]) & 0xffff; + +	/* Set primary MAC address */ +	csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); +	csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); +} + +/* MAC software reset. + * When reset is triggered, the MAC function completes the current + * transmission or reception, and subsequently disables the transmit and + * receive logic, flushes the receive FIFO buffer, and resets the statistics + * counters. + */ +static int reset_mac(struct altera_tse_private *priv) +{ +	int counter; +	u32 dat; + +	dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); +	dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); +	dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; +	csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); + +	counter = 0; +	while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { +		if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), +				     MAC_CMDCFG_SW_RESET)) +			break; +		udelay(1); +	} + +	if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { +		dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); +		dat &= ~MAC_CMDCFG_SW_RESET; +		csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); +		return -1; +	} +	return 0; +} + +/* Initialize MAC core registers +*/ +static int init_mac(struct altera_tse_private *priv) +{ +	unsigned int cmd = 0; +	u32 frm_length; + +	/* Setup Rx FIFO */ +	csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, +		priv->mac_dev, tse_csroffs(rx_section_empty)); + +	csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, +		tse_csroffs(rx_section_full)); + +	csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, +		tse_csroffs(rx_almost_empty)); + +	csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, +		tse_csroffs(rx_almost_full)); + +	/* Setup Tx FIFO */ +	csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, +		priv->mac_dev, tse_csroffs(tx_section_empty)); + +	csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, +		tse_csroffs(tx_section_full)); + +	csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, +		tse_csroffs(tx_almost_empty)); + +	csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, +		tse_csroffs(tx_almost_full)); + +	/* MAC Address Configuration */ +	tse_update_mac_addr(priv, priv->dev->dev_addr); + +	/* MAC Function Configuration */ +	frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; +	csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); + +	csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, +		tse_csroffs(tx_ipg_length)); + +	/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit +	 * start address +	 */ +	tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), +		    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); + +	tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), +		      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | +		      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); + +	/* Set the MAC options */ +	cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); +	cmd &= ~MAC_CMDCFG_PAD_EN;	/* No padding Removal on Receive */ +	cmd &= ~MAC_CMDCFG_CRC_FWD;	/* CRC Removal */ +	cmd |= MAC_CMDCFG_RX_ERR_DISC;	/* Automatically discard frames +					 * with CRC errors +					 */ +	cmd |= MAC_CMDCFG_CNTL_FRM_ENA; +	cmd &= ~MAC_CMDCFG_TX_ENA; +	cmd &= ~MAC_CMDCFG_RX_ENA; + +	/* Default speed and duplex setting, full/100 */ +	cmd &= ~MAC_CMDCFG_HD_ENA; +	cmd &= ~MAC_CMDCFG_ETH_SPEED; +	cmd &= ~MAC_CMDCFG_ENA_10; + +	csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); + +	csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, +		tse_csroffs(pause_quanta)); + +	if (netif_msg_hw(priv)) +		dev_dbg(priv->device, +			"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd); + +	return 0; +} + +/* Start/stop MAC transmission logic + */ +static void tse_set_mac(struct altera_tse_private *priv, bool enable) +{ +	u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); + +	if (enable) +		value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; +	else +		value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); + +	csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); +} + +/* Change the MTU + */ +static int tse_change_mtu(struct net_device *dev, int new_mtu) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	unsigned int max_mtu = priv->max_mtu; +	unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN; + +	if (netif_running(dev)) { +		netdev_err(dev, "must be stopped to change its MTU\n"); +		return -EBUSY; +	} + +	if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) { +		netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu); +		return -EINVAL; +	} + +	dev->mtu = new_mtu; +	netdev_update_features(dev); + +	return 0; +} + +static void altera_tse_set_mcfilter(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	int i; +	struct netdev_hw_addr *ha; + +	/* clear the hash filter */ +	for (i = 0; i < 64; i++) +		csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); + +	netdev_for_each_mc_addr(ha, dev) { +		unsigned int hash = 0; +		int mac_octet; + +		for (mac_octet = 5; mac_octet >= 0; mac_octet--) { +			unsigned char xor_bit = 0; +			unsigned char octet = ha->addr[mac_octet]; +			unsigned int bitshift; + +			for (bitshift = 0; bitshift < 8; bitshift++) +				xor_bit ^= ((octet >> bitshift) & 0x01); + +			hash = (hash << 1) | xor_bit; +		} +		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); +	} +} + + +static void altera_tse_set_mcfilterall(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	int i; + +	/* set the hash filter */ +	for (i = 0; i < 64; i++) +		csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void tse_set_rx_mode_hashfilter(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); + +	spin_lock(&priv->mac_cfg_lock); + +	if (dev->flags & IFF_PROMISC) +		tse_set_bit(priv->mac_dev, tse_csroffs(command_config), +			    MAC_CMDCFG_PROMIS_EN); + +	if (dev->flags & IFF_ALLMULTI) +		altera_tse_set_mcfilterall(dev); +	else +		altera_tse_set_mcfilter(dev); + +	spin_unlock(&priv->mac_cfg_lock); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void tse_set_rx_mode(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); + +	spin_lock(&priv->mac_cfg_lock); + +	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || +	    !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) +		tse_set_bit(priv->mac_dev, tse_csroffs(command_config), +			    MAC_CMDCFG_PROMIS_EN); +	else +		tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), +			      MAC_CMDCFG_PROMIS_EN); + +	spin_unlock(&priv->mac_cfg_lock); +} + +/* Open and initialize the interface + */ +static int tse_open(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	int ret = 0; +	int i; +	unsigned long int flags; + +	/* Reset and configure TSE MAC and probe associated PHY */ +	ret = priv->dmaops->init_dma(priv); +	if (ret != 0) { +		netdev_err(dev, "Cannot initialize DMA\n"); +		goto phy_error; +	} + +	if (netif_msg_ifup(priv)) +		netdev_warn(dev, "device MAC address %pM\n", +			    dev->dev_addr); + +	if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) +		netdev_warn(dev, "TSE revision %x\n", priv->revision); + +	spin_lock(&priv->mac_cfg_lock); +	ret = reset_mac(priv); +	if (ret) +		netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); + +	ret = init_mac(priv); +	spin_unlock(&priv->mac_cfg_lock); +	if (ret) { +		netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret); +		goto alloc_skbuf_error; +	} + +	priv->dmaops->reset_dma(priv); + +	/* Create and initialize the TX/RX descriptors chains. */ +	priv->rx_ring_size = dma_rx_num; +	priv->tx_ring_size = dma_tx_num; +	ret = alloc_init_skbufs(priv); +	if (ret) { +		netdev_err(dev, "DMA descriptors initialization failed\n"); +		goto alloc_skbuf_error; +	} + + +	/* Register RX interrupt */ +	ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED, +			  dev->name, dev); +	if (ret) { +		netdev_err(dev, "Unable to register RX interrupt %d\n", +			   priv->rx_irq); +		goto init_error; +	} + +	/* Register TX interrupt */ +	ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED, +			  dev->name, dev); +	if (ret) { +		netdev_err(dev, "Unable to register TX interrupt %d\n", +			   priv->tx_irq); +		goto tx_request_irq_error; +	} + +	/* Enable DMA interrupts */ +	spin_lock_irqsave(&priv->rxdma_irq_lock, flags); +	priv->dmaops->enable_rxirq(priv); +	priv->dmaops->enable_txirq(priv); + +	/* Setup RX descriptor chain */ +	for (i = 0; i < priv->rx_ring_size; i++) +		priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); + +	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + +	if (priv->phydev) +		phy_start(priv->phydev); + +	napi_enable(&priv->napi); +	netif_start_queue(dev); + +	priv->dmaops->start_rxdma(priv); + +	/* Start MAC Rx/Tx */ +	spin_lock(&priv->mac_cfg_lock); +	tse_set_mac(priv, true); +	spin_unlock(&priv->mac_cfg_lock); + +	return 0; + +tx_request_irq_error: +	free_irq(priv->rx_irq, dev); +init_error: +	free_skbufs(dev); +alloc_skbuf_error: +	if (priv->phydev) { +		phy_disconnect(priv->phydev); +		priv->phydev = NULL; +	} +phy_error: +	return ret; +} + +/* Stop TSE MAC interface and put the device in an inactive state + */ +static int tse_shutdown(struct net_device *dev) +{ +	struct altera_tse_private *priv = netdev_priv(dev); +	int ret; +	unsigned long int flags; + +	/* Stop and disconnect the PHY */ +	if (priv->phydev) { +		phy_stop(priv->phydev); +		phy_disconnect(priv->phydev); +		priv->phydev = NULL; +	} + +	netif_stop_queue(dev); +	napi_disable(&priv->napi); + +	/* Disable DMA interrupts */ +	spin_lock_irqsave(&priv->rxdma_irq_lock, flags); +	priv->dmaops->disable_rxirq(priv); +	priv->dmaops->disable_txirq(priv); +	spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + +	/* Free the IRQ lines */ +	free_irq(priv->rx_irq, dev); +	free_irq(priv->tx_irq, dev); + +	/* disable and reset the MAC, empties fifo */ +	spin_lock(&priv->mac_cfg_lock); +	spin_lock(&priv->tx_lock); + +	ret = reset_mac(priv); +	if (ret) +		netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); +	priv->dmaops->reset_dma(priv); +	free_skbufs(dev); + +	spin_unlock(&priv->tx_lock); +	spin_unlock(&priv->mac_cfg_lock); + +	priv->dmaops->uninit_dma(priv); + +	return 0; +} + +static struct net_device_ops altera_tse_netdev_ops = { +	.ndo_open		= tse_open, +	.ndo_stop		= tse_shutdown, +	.ndo_start_xmit		= tse_start_xmit, +	.ndo_set_mac_address	= eth_mac_addr, +	.ndo_set_rx_mode	= tse_set_rx_mode, +	.ndo_change_mtu		= tse_change_mtu, +	.ndo_validate_addr	= eth_validate_addr, +}; + +static int request_and_map(struct platform_device *pdev, const char *name, +			   struct resource **res, void __iomem **ptr) +{ +	struct resource *region; +	struct device *device = &pdev->dev; + +	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); +	if (*res == NULL) { +		dev_err(device, "resource %s not defined\n", name); +		return -ENODEV; +	} + +	region = devm_request_mem_region(device, (*res)->start, +					 resource_size(*res), dev_name(device)); +	if (region == NULL) { +		dev_err(device, "unable to request %s\n", name); +		return -EBUSY; +	} + +	*ptr = devm_ioremap_nocache(device, region->start, +				    resource_size(region)); +	if (*ptr == NULL) { +		dev_err(device, "ioremap_nocache of %s failed!", name); +		return -ENOMEM; +	} + +	return 0; +} + +/* Probe Altera TSE MAC device + */ +static int altera_tse_probe(struct platform_device *pdev) +{ +	struct net_device *ndev; +	int ret = -ENODEV; +	struct resource *control_port; +	struct resource *dma_res; +	struct altera_tse_private *priv; +	const unsigned char *macaddr; +	struct device_node *np = pdev->dev.of_node; +	void __iomem *descmap; +	const struct of_device_id *of_id = NULL; + +	ndev = alloc_etherdev(sizeof(struct altera_tse_private)); +	if (!ndev) { +		dev_err(&pdev->dev, "Could not allocate network device\n"); +		return -ENODEV; +	} + +	SET_NETDEV_DEV(ndev, &pdev->dev); + +	priv = netdev_priv(ndev); +	priv->device = &pdev->dev; +	priv->dev = ndev; +	priv->msg_enable = netif_msg_init(debug, default_msg_level); + +	of_id = of_match_device(altera_tse_ids, &pdev->dev); + +	if (of_id) +		priv->dmaops = (struct altera_dmaops *)of_id->data; + + +	if (priv->dmaops && +	    priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { +		/* Get the mapped address to the SGDMA descriptor memory */ +		ret = request_and_map(pdev, "s1", &dma_res, &descmap); +		if (ret) +			goto err_free_netdev; + +		/* Start of that memory is for transmit descriptors */ +		priv->tx_dma_desc = descmap; + +		/* First half is for tx descriptors, other half for tx */ +		priv->txdescmem = resource_size(dma_res)/2; + +		priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; + +		priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + +						     priv->txdescmem)); +		priv->rxdescmem = resource_size(dma_res)/2; +		priv->rxdescmem_busaddr = dma_res->start; +		priv->rxdescmem_busaddr += priv->txdescmem; + +		if (upper_32_bits(priv->rxdescmem_busaddr)) { +			dev_dbg(priv->device, +				"SGDMA bus addresses greater than 32-bits\n"); +			goto err_free_netdev; +		} +		if (upper_32_bits(priv->txdescmem_busaddr)) { +			dev_dbg(priv->device, +				"SGDMA bus addresses greater than 32-bits\n"); +			goto err_free_netdev; +		} +	} else if (priv->dmaops && +		   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { +		ret = request_and_map(pdev, "rx_resp", &dma_res, +				      &priv->rx_dma_resp); +		if (ret) +			goto err_free_netdev; + +		ret = request_and_map(pdev, "tx_desc", &dma_res, +				      &priv->tx_dma_desc); +		if (ret) +			goto err_free_netdev; + +		priv->txdescmem = resource_size(dma_res); +		priv->txdescmem_busaddr = dma_res->start; + +		ret = request_and_map(pdev, "rx_desc", &dma_res, +				      &priv->rx_dma_desc); +		if (ret) +			goto err_free_netdev; + +		priv->rxdescmem = resource_size(dma_res); +		priv->rxdescmem_busaddr = dma_res->start; + +	} else { +		goto err_free_netdev; +	} + +	if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) +		dma_set_coherent_mask(priv->device, +				      DMA_BIT_MASK(priv->dmaops->dmamask)); +	else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) +		dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); +	else +		goto err_free_netdev; + +	/* MAC address space */ +	ret = request_and_map(pdev, "control_port", &control_port, +			      (void __iomem **)&priv->mac_dev); +	if (ret) +		goto err_free_netdev; + +	/* xSGDMA Rx Dispatcher address space */ +	ret = request_and_map(pdev, "rx_csr", &dma_res, +			      &priv->rx_dma_csr); +	if (ret) +		goto err_free_netdev; + + +	/* xSGDMA Tx Dispatcher address space */ +	ret = request_and_map(pdev, "tx_csr", &dma_res, +			      &priv->tx_dma_csr); +	if (ret) +		goto err_free_netdev; + + +	/* Rx IRQ */ +	priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); +	if (priv->rx_irq == -ENXIO) { +		dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); +		ret = -ENXIO; +		goto err_free_netdev; +	} + +	/* Tx IRQ */ +	priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq"); +	if (priv->tx_irq == -ENXIO) { +		dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); +		ret = -ENXIO; +		goto err_free_netdev; +	} + +	/* get FIFO depths from device tree */ +	if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", +				 &priv->rx_fifo_depth)) { +		dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); +		ret = -ENXIO; +		goto err_free_netdev; +	} + +	if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", +				 &priv->rx_fifo_depth)) { +		dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); +		ret = -ENXIO; +		goto err_free_netdev; +	} + +	/* get hash filter settings for this instance */ +	priv->hash_filter = +		of_property_read_bool(pdev->dev.of_node, +				      "altr,has-hash-multicast-filter"); + +	/* Set hash filter to not set for now until the +	 * multicast filter receive issue is debugged +	 */ +	priv->hash_filter = 0; + +	/* get supplemental address settings for this instance */ +	priv->added_unicast = +		of_property_read_bool(pdev->dev.of_node, +				      "altr,has-supplementary-unicast"); + +	/* Max MTU is 1500, ETH_DATA_LEN */ +	priv->max_mtu = ETH_DATA_LEN; + +	/* Get the max mtu from the device tree. Note that the +	 * "max-frame-size" parameter is actually max mtu. Definition +	 * in the ePAPR v1.1 spec and usage differ, so go with usage. +	 */ +	of_property_read_u32(pdev->dev.of_node, "max-frame-size", +			     &priv->max_mtu); + +	/* The DMA buffer size already accounts for an alignment bias +	 * to avoid unaligned access exceptions for the NIOS processor, +	 */ +	priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; + +	/* get default MAC address from device tree */ +	macaddr = of_get_mac_address(pdev->dev.of_node); +	if (macaddr) +		ether_addr_copy(ndev->dev_addr, macaddr); +	else +		eth_hw_addr_random(ndev); + +	priv->phy_iface = of_get_phy_mode(np); + +	/* try to get PHY address from device tree, use PHY autodetection if +	 * no valid address is given +	 */ +	if (of_property_read_u32(pdev->dev.of_node, "phy-addr", +				 &priv->phy_addr)) { +		priv->phy_addr = POLL_PHY; +	} + +	if (!((priv->phy_addr == POLL_PHY) || +	      ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { +		dev_err(&pdev->dev, "invalid phy-addr specified %d\n", +			priv->phy_addr); +		goto err_free_netdev; +	} + +	/* Create/attach to MDIO bus */ +	ret = altera_tse_mdio_create(ndev, +				     atomic_add_return(1, &instance_count)); + +	if (ret) +		goto err_free_netdev; + +	/* initialize netdev */ +	ether_setup(ndev); +	ndev->mem_start = control_port->start; +	ndev->mem_end = control_port->end; +	ndev->netdev_ops = &altera_tse_netdev_ops; +	altera_tse_set_ethtool_ops(ndev); + +	altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode; + +	if (priv->hash_filter) +		altera_tse_netdev_ops.ndo_set_rx_mode = +			tse_set_rx_mode_hashfilter; + +	/* Scatter/gather IO is not supported, +	 * so it is turned off +	 */ +	ndev->hw_features &= ~NETIF_F_SG; +	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + +	/* VLAN offloading of tagging, stripping and filtering is not +	 * supported by hardware, but driver will accommodate the +	 * extra 4-byte VLAN tag for processing by upper layers +	 */ +	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + +	/* setup NAPI interface */ +	netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT); + +	spin_lock_init(&priv->mac_cfg_lock); +	spin_lock_init(&priv->tx_lock); +	spin_lock_init(&priv->rxdma_irq_lock); + +	ret = register_netdev(ndev); +	if (ret) { +		dev_err(&pdev->dev, "failed to register TSE net device\n"); +		goto err_register_netdev; +	} + +	platform_set_drvdata(pdev, ndev); + +	priv->revision = ioread32(&priv->mac_dev->megacore_revision); + +	if (netif_msg_probe(priv)) +		dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", +			 (priv->revision >> 8) & 0xff, +			 priv->revision & 0xff, +			 (unsigned long) control_port->start, priv->rx_irq, +			 priv->tx_irq); + +	ret = init_phy(ndev); +	if (ret != 0) { +		netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); +		goto err_init_phy; +	} +	return 0; + +err_init_phy: +	unregister_netdev(ndev); +err_register_netdev: +	netif_napi_del(&priv->napi); +	altera_tse_mdio_destroy(ndev); +err_free_netdev: +	free_netdev(ndev); +	return ret; +} + +/* Remove Altera TSE MAC device + */ +static int altera_tse_remove(struct platform_device *pdev) +{ +	struct net_device *ndev = platform_get_drvdata(pdev); + +	platform_set_drvdata(pdev, NULL); +	altera_tse_mdio_destroy(ndev); +	unregister_netdev(ndev); +	free_netdev(ndev); + +	return 0; +} + +static const struct altera_dmaops altera_dtype_sgdma = { +	.altera_dtype = ALTERA_DTYPE_SGDMA, +	.dmamask = 32, +	.reset_dma = sgdma_reset, +	.enable_txirq = sgdma_enable_txirq, +	.enable_rxirq = sgdma_enable_rxirq, +	.disable_txirq = sgdma_disable_txirq, +	.disable_rxirq = sgdma_disable_rxirq, +	.clear_txirq = sgdma_clear_txirq, +	.clear_rxirq = sgdma_clear_rxirq, +	.tx_buffer = sgdma_tx_buffer, +	.tx_completions = sgdma_tx_completions, +	.add_rx_desc = sgdma_add_rx_desc, +	.get_rx_status = sgdma_rx_status, +	.init_dma = sgdma_initialize, +	.uninit_dma = sgdma_uninitialize, +	.start_rxdma = sgdma_start_rxdma, +}; + +static const struct altera_dmaops altera_dtype_msgdma = { +	.altera_dtype = ALTERA_DTYPE_MSGDMA, +	.dmamask = 64, +	.reset_dma = msgdma_reset, +	.enable_txirq = msgdma_enable_txirq, +	.enable_rxirq = msgdma_enable_rxirq, +	.disable_txirq = msgdma_disable_txirq, +	.disable_rxirq = msgdma_disable_rxirq, +	.clear_txirq = msgdma_clear_txirq, +	.clear_rxirq = msgdma_clear_rxirq, +	.tx_buffer = msgdma_tx_buffer, +	.tx_completions = msgdma_tx_completions, +	.add_rx_desc = msgdma_add_rx_desc, +	.get_rx_status = msgdma_rx_status, +	.init_dma = msgdma_initialize, +	.uninit_dma = msgdma_uninitialize, +	.start_rxdma = msgdma_start_rxdma, +}; + +static struct of_device_id altera_tse_ids[] = { +	{ .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, }, +	{ .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, }, +	{ .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, }, +	{}, +}; +MODULE_DEVICE_TABLE(of, altera_tse_ids); + +static struct platform_driver altera_tse_driver = { +	.probe		= altera_tse_probe, +	.remove		= altera_tse_remove, +	.suspend	= NULL, +	.resume		= NULL, +	.driver		= { +		.name	= ALTERA_TSE_RESOURCE_NAME, +		.owner	= THIS_MODULE, +		.of_match_table = altera_tse_ids, +	}, +}; + +module_platform_driver(altera_tse_driver); + +MODULE_AUTHOR("Altera Corporation"); +MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c new file mode 100644 index 00000000000..d7eeb1713ad --- /dev/null +++ b/drivers/net/ethernet/altera/altera_utils.c @@ -0,0 +1,44 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include "altera_tse.h" +#include "altera_utils.h" + +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ +	u32 value = csrrd32(ioaddr, offs); +	value |= bit_mask; +	csrwr32(value, ioaddr, offs); +} + +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ +	u32 value = csrrd32(ioaddr, offs); +	value &= ~bit_mask; +	csrwr32(value, ioaddr, offs); +} + +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ +	u32 value = csrrd32(ioaddr, offs); +	return (value & bit_mask) ? 1 : 0; +} + +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ +	u32 value = csrrd32(ioaddr, offs); +	return (value & bit_mask) ? 0 : 1; +} diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h new file mode 100644 index 00000000000..baf100ccf58 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_utils.h @@ -0,0 +1,27 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> + +#ifndef __ALTERA_UTILS_H__ +#define __ALTERA_UTILS_H__ + +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask); + +#endif /* __ALTERA_UTILS_H__*/  | 
