diff options
Diffstat (limited to 'drivers/rapidio')
| -rw-r--r-- | drivers/rapidio/Kconfig | 43 | ||||
| -rw-r--r-- | drivers/rapidio/Makefile | 10 | ||||
| -rw-r--r-- | drivers/rapidio/devices/Kconfig | 10 | ||||
| -rw-r--r-- | drivers/rapidio/devices/Makefile | 7 | ||||
| -rw-r--r-- | drivers/rapidio/devices/tsi721.c | 2518 | ||||
| -rw-r--r-- | drivers/rapidio/devices/tsi721.h | 857 | ||||
| -rw-r--r-- | drivers/rapidio/devices/tsi721_dma.c | 883 | ||||
| -rw-r--r-- | drivers/rapidio/rio-driver.c | 48 | ||||
| -rw-r--r-- | drivers/rapidio/rio-scan.c | 808 | ||||
| -rw-r--r-- | drivers/rapidio/rio-sysfs.c | 180 | ||||
| -rw-r--r-- | drivers/rapidio/rio.c | 935 | ||||
| -rw-r--r-- | drivers/rapidio/rio.h | 56 | ||||
| -rw-r--r-- | drivers/rapidio/switches/Kconfig | 19 | ||||
| -rw-r--r-- | drivers/rapidio/switches/Makefile | 5 | ||||
| -rw-r--r-- | drivers/rapidio/switches/idt_gen2.c | 196 | ||||
| -rw-r--r-- | drivers/rapidio/switches/idtcps.c | 98 | ||||
| -rw-r--r-- | drivers/rapidio/switches/tsi500.c | 78 | ||||
| -rw-r--r-- | drivers/rapidio/switches/tsi568.c | 84 | ||||
| -rw-r--r-- | drivers/rapidio/switches/tsi57x.c | 139 | 
19 files changed, 6099 insertions, 875 deletions
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 070211a5955..3e3be57e9a1 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -1,6 +1,8 @@  #  # RapidIO configuration  # +source "drivers/rapidio/devices/Kconfig" +  config RAPIDIO_DISC_TIMEOUT  	int "Discovery timeout duration (seconds)"  	depends on RAPIDIO @@ -20,7 +22,19 @@ config RAPIDIO_ENABLE_RX_TX_PORTS  	  ports for Input/Output direction to allow other traffic  	  than Maintenance transfers. -source "drivers/rapidio/switches/Kconfig" +config RAPIDIO_DMA_ENGINE +	bool "DMA Engine support for RapidIO" +	depends on RAPIDIO +	select DMADEVICES +	select DMA_ENGINE +	help +	  Say Y here if you want to use DMA Engine frameork for RapidIO data +	  transfers to/from target RIO devices. RapidIO uses NREAD and +	  NWRITE (NWRITE_R, SWRITE) requests to transfer data between local +	  memory and memory on remote target device. You need a DMA controller +	  capable to perform data transfers to/from RapidIO. + +	  If you are unsure about this, say Y here.  config RAPIDIO_DEBUG  	bool "RapidIO subsystem debug messages" @@ -32,3 +46,30 @@ config RAPIDIO_DEBUG  	  going on.  	  If you are unsure about this, say N here. + +choice +	prompt "Enumeration method" +	depends on RAPIDIO +	default RAPIDIO_ENUM_BASIC +	help +	  There are different enumeration and discovery mechanisms offered +	  for RapidIO subsystem. You may select single built-in method or +	  or any number of methods to be built as modules. +	  Selecting a built-in method disables use of loadable methods. + +	  If unsure, select Basic built-in. + +config RAPIDIO_ENUM_BASIC +	tristate "Basic" +	help +	  This option includes basic RapidIO fabric enumeration and discovery +	  mechanism similar to one described in RapidIO specification Annex 1. + +endchoice + +menu "RapidIO Switch drivers" +	depends on RAPIDIO + +source "drivers/rapidio/switches/Kconfig" + +endmenu diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index b6139fe187b..6271ada6993 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile @@ -1,10 +1,12 @@  #  # Makefile for RapidIO interconnect services  # -obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o +obj-$(CONFIG_RAPIDIO) += rapidio.o +rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o + +obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o  obj-$(CONFIG_RAPIDIO)		+= switches/ +obj-$(CONFIG_RAPIDIO)		+= devices/ -ifeq ($(CONFIG_RAPIDIO_DEBUG),y) -EXTRA_CFLAGS += -DDEBUG -endif +subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig new file mode 100644 index 00000000000..c4cb0877592 --- /dev/null +++ b/drivers/rapidio/devices/Kconfig @@ -0,0 +1,10 @@ +# +# RapidIO master port configuration +# + +config RAPIDIO_TSI721 +	tristate "IDT Tsi721 PCI Express SRIO Controller support" +	depends on RAPIDIO && PCIEPORTBUS +	default "n" +	---help--- +	  Include support for IDT Tsi721 PCI Express Serial RapidIO controller. diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile new file mode 100644 index 00000000000..9432c494cf5 --- /dev/null +++ b/drivers/rapidio/devices/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for RapidIO devices +# + +obj-$(CONFIG_RAPIDIO_TSI721)	+= tsi721_mport.o +tsi721_mport-y			:= tsi721.o +tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c new file mode 100644 index 00000000000..2ca1a0b3ad5 --- /dev/null +++ b/drivers/rapidio/devices/tsi721.c @@ -0,0 +1,2518 @@ +/* + * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge + * + * Copyright 2011 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * Chul Kim <chul.kim@idt.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA  02111-1307, USA. + */ + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kfifo.h> +#include <linux/delay.h> + +#include "tsi721.h" + +#define DEBUG_PW	/* Inbound Port-Write debugging */ + +static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); +static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); + +/** + * tsi721_lcread - read from local SREP config space + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be read into + * + * Generates a local SREP space read. Returns %0 on + * success or %-EINVAL on failure. + */ +static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, +			 int len, u32 *data) +{ +	struct tsi721_device *priv = mport->priv; + +	if (len != sizeof(u32)) +		return -EINVAL; /* only 32-bit access is supported */ + +	*data = ioread32(priv->regs + offset); + +	return 0; +} + +/** + * tsi721_lcwrite - write into local SREP config space + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be written + * + * Generates a local write into SREP configuration space. Returns %0 on + * success or %-EINVAL on failure. + */ +static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, +			  int len, u32 data) +{ +	struct tsi721_device *priv = mport->priv; + +	if (len != sizeof(u32)) +		return -EINVAL; /* only 32-bit access is supported */ + +	iowrite32(data, priv->regs + offset); + +	return 0; +} + +/** + * tsi721_maint_dma - Helper function to generate RapidIO maintenance + *                    transactions using designated Tsi721 DMA channel. + * @priv: pointer to tsi721 private data + * @sys_size: RapdiIO transport system size + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Location to be read from or write into + * @do_wr: Operation flag (1 == MAINT_WR) + * + * Generates a RapidIO maintenance transaction (Read or Write). + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, +			u16 destid, u8 hopcount, u32 offset, int len, +			u32 *data, int do_wr) +{ +	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); +	struct tsi721_dma_desc *bd_ptr; +	u32 rd_count, swr_ptr, ch_stat; +	int i, err = 0; +	u32 op = do_wr ? MAINT_WR : MAINT_RD; + +	if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) +		return -EINVAL; + +	bd_ptr = priv->mdma.bd_base; + +	rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); + +	/* Initialize DMA descriptor */ +	bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); +	bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); +	bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); +	bd_ptr[0].raddr_hi = 0; +	if (do_wr) +		bd_ptr[0].data[0] = cpu_to_be32p(data); +	else +		bd_ptr[0].data[0] = 0xffffffff; + +	mb(); + +	/* Start DMA operation */ +	iowrite32(rd_count + 2,	regs + TSI721_DMAC_DWRCNT); +	ioread32(regs + TSI721_DMAC_DWRCNT); +	i = 0; + +	/* Wait until DMA transfer is finished */ +	while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) +							& TSI721_DMAC_STS_RUN) { +		udelay(1); +		if (++i >= 5000000) { +			dev_dbg(&priv->pdev->dev, +				"%s : DMA[%d] read timeout ch_status=%x\n", +				__func__, priv->mdma.ch_id, ch_stat); +			if (!do_wr) +				*data = 0xffffffff; +			err = -EIO; +			goto err_out; +		} +	} + +	if (ch_stat & TSI721_DMAC_STS_ABORT) { +		/* If DMA operation aborted due to error, +		 * reinitialize DMA channel +		 */ +		dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n", +			__func__, ch_stat); +		dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", +			do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); +		iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); +		iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); +		udelay(10); +		iowrite32(0, regs + TSI721_DMAC_DWRCNT); +		udelay(1); +		if (!do_wr) +			*data = 0xffffffff; +		err = -EIO; +		goto err_out; +	} + +	if (!do_wr) +		*data = be32_to_cpu(bd_ptr[0].data[0]); + +	/* +	 * Update descriptor status FIFO RD pointer. +	 * NOTE: Skipping check and clear FIFO entries because we are waiting +	 * for transfer to be completed. +	 */ +	swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); +	iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); +err_out: + +	return err; +} + +/** + * tsi721_cread_dma - Generate a RapidIO maintenance read transaction + *                    using Tsi721 BDMA engine. + * @mport: RapidIO master port control structure + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Location to be read into + * + * Generates a RapidIO maintenance read transaction. + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, +			u8 hopcount, u32 offset, int len, u32 *data) +{ +	struct tsi721_device *priv = mport->priv; + +	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, +				offset, len, data, 0); +} + +/** + * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction + *                     using Tsi721 BDMA engine + * @mport: RapidIO master port control structure + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Value to be written + * + * Generates a RapidIO maintenance write transaction. + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, +			 u8 hopcount, u32 offset, int len, u32 data) +{ +	struct tsi721_device *priv = mport->priv; +	u32 temp = data; + +	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, +				offset, len, &temp, 1); +} + +/** + * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler + * @mport: RapidIO master port structure + * + * Handles inbound port-write interrupts. Copies PW message from an internal + * buffer into PW message FIFO and schedules deferred routine to process + * queued messages. + */ +static int +tsi721_pw_handler(struct rio_mport *mport) +{ +	struct tsi721_device *priv = mport->priv; +	u32 pw_stat; +	u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; + + +	pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); + +	if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { +		pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); +		pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); +		pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); +		pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); + +		/* Queue PW message (if there is room in FIFO), +		 * otherwise discard it. +		 */ +		spin_lock(&priv->pw_fifo_lock); +		if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) +			kfifo_in(&priv->pw_fifo, pw_buf, +						TSI721_RIO_PW_MSG_SIZE); +		else +			priv->pw_discard_count++; +		spin_unlock(&priv->pw_fifo_lock); +	} + +	/* Clear pending PW interrupts */ +	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, +		  priv->regs + TSI721_RIO_PW_RX_STAT); + +	schedule_work(&priv->pw_work); + +	return 0; +} + +static void tsi721_pw_dpc(struct work_struct *work) +{ +	struct tsi721_device *priv = container_of(work, struct tsi721_device, +						    pw_work); +	u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message +							buffer for RIO layer */ + +	/* +	 * Process port-write messages +	 */ +	while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer, +			 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { +		/* Process one message */ +#ifdef DEBUG_PW +		{ +		u32 i; +		pr_debug("%s : Port-Write Message:", __func__); +		for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) { +			pr_debug("0x%02x: %08x %08x %08x %08x", i*4, +				msg_buffer[i], msg_buffer[i + 1], +				msg_buffer[i + 2], msg_buffer[i + 3]); +			i += 4; +		} +		pr_debug("\n"); +		} +#endif +		/* Pass the port-write message to RIO core for processing */ +		rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); +	} +} + +/** + * tsi721_pw_enable - enable/disable port-write interface init + * @mport: Master port implementing the port write unit + * @enable:    1=enable; 0=disable port-write message handling + */ +static int tsi721_pw_enable(struct rio_mport *mport, int enable) +{ +	struct tsi721_device *priv = mport->priv; +	u32 rval; + +	rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); + +	if (enable) +		rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; +	else +		rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; + +	/* Clear pending PW interrupts */ +	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, +		  priv->regs + TSI721_RIO_PW_RX_STAT); +	/* Update enable bits */ +	iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); + +	return 0; +} + +/** + * tsi721_dsend - Send a RapidIO doorbell + * @mport: RapidIO master port info + * @index: ID of RapidIO interface + * @destid: Destination ID of target device + * @data: 16-bit info field of RapidIO doorbell + * + * Sends a RapidIO doorbell message. Always returns %0. + */ +static int tsi721_dsend(struct rio_mport *mport, int index, +			u16 destid, u16 data) +{ +	struct tsi721_device *priv = mport->priv; +	u32 offset; + +	offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | +		 (destid << 2); + +	dev_dbg(&priv->pdev->dev, +		"Send Doorbell 0x%04x to destID 0x%x\n", data, destid); +	iowrite16be(data, priv->odb_base + offset); + +	return 0; +} + +/** + * tsi721_dbell_handler - Tsi721 doorbell interrupt handler + * @mport: RapidIO master port structure + * + * Handles inbound doorbell interrupts. Copies doorbell entry from an internal + * buffer into DB message FIFO and schedules deferred  routine to process + * queued DBs. + */ +static int +tsi721_dbell_handler(struct rio_mport *mport) +{ +	struct tsi721_device *priv = mport->priv; +	u32 regval; + +	/* Disable IDB interrupts */ +	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); +	regval &= ~TSI721_SR_CHINT_IDBQRCV; +	iowrite32(regval, +		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + +	schedule_work(&priv->idb_work); + +	return 0; +} + +static void tsi721_db_dpc(struct work_struct *work) +{ +	struct tsi721_device *priv = container_of(work, struct tsi721_device, +						    idb_work); +	struct rio_mport *mport; +	struct rio_dbell *dbell; +	int found = 0; +	u32 wr_ptr, rd_ptr; +	u64 *idb_entry; +	u32 regval; +	union { +		u64 msg; +		u8  bytes[8]; +	} idb; + +	/* +	 * Process queued inbound doorbells +	 */ +	mport = priv->mport; + +	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; +	rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; + +	while (wr_ptr != rd_ptr) { +		idb_entry = (u64 *)(priv->idb_base + +					(TSI721_IDB_ENTRY_SIZE * rd_ptr)); +		rd_ptr++; +		rd_ptr %= IDB_QSIZE; +		idb.msg = *idb_entry; +		*idb_entry = 0; + +		/* Process one doorbell */ +		list_for_each_entry(dbell, &mport->dbells, node) { +			if ((dbell->res->start <= DBELL_INF(idb.bytes)) && +			    (dbell->res->end >= DBELL_INF(idb.bytes))) { +				found = 1; +				break; +			} +		} + +		if (found) { +			dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), +				    DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); +		} else { +			dev_dbg(&priv->pdev->dev, +				"spurious inb doorbell, sid %2.2x tid %2.2x" +				" info %4.4x\n", DBELL_SID(idb.bytes), +				DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); +		} + +		wr_ptr = ioread32(priv->regs + +				  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; +	} + +	iowrite32(rd_ptr & (IDB_QSIZE - 1), +		priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); + +	/* Re-enable IDB interrupts */ +	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); +	regval |= TSI721_SR_CHINT_IDBQRCV; +	iowrite32(regval, +		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + +	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; +	if (wr_ptr != rd_ptr) +		schedule_work(&priv->idb_work); +} + +/** + * tsi721_irqhandler - Tsi721 interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported + * interrupt events and calls an event-specific handler(s). + */ +static irqreturn_t tsi721_irqhandler(int irq, void *ptr) +{ +	struct rio_mport *mport = (struct rio_mport *)ptr; +	struct tsi721_device *priv = mport->priv; +	u32 dev_int; +	u32 dev_ch_int; +	u32 intval; +	u32 ch_inte; + +	/* For MSI mode disable all device-level interrupts */ +	if (priv->flags & TSI721_USING_MSI) +		iowrite32(0, priv->regs + TSI721_DEV_INTE); + +	dev_int = ioread32(priv->regs + TSI721_DEV_INT); +	if (!dev_int) +		return IRQ_NONE; + +	dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); + +	if (dev_int & TSI721_DEV_INT_SR2PC_CH) { +		/* Service SR2PC Channel interrupts */ +		if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { +			/* Service Inbound Doorbell interrupt */ +			intval = ioread32(priv->regs + +						TSI721_SR_CHINT(IDB_QUEUE)); +			if (intval & TSI721_SR_CHINT_IDBQRCV) +				tsi721_dbell_handler(mport); +			else +				dev_info(&priv->pdev->dev, +					"Unsupported SR_CH_INT %x\n", intval); + +			/* Clear interrupts */ +			iowrite32(intval, +				priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); +			ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); +		} +	} + +	if (dev_int & TSI721_DEV_INT_SMSG_CH) { +		int ch; + +		/* +		 * Service channel interrupts from Messaging Engine +		 */ + +		if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ +			/* Disable signaled OB MSG Channel interrupts */ +			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +			ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); +			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + +			/* +			 * Process Inbound Message interrupt for each MBOX +			 */ +			for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { +				if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) +					continue; +				tsi721_imsg_handler(priv, ch); +			} +		} + +		if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ +			/* Disable signaled OB MSG Channel interrupts */ +			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +			ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); +			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + +			/* +			 * Process Outbound Message interrupts for each MBOX +			 */ + +			for (ch = 0; ch < RIO_MAX_MBOX; ch++) { +				if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) +					continue; +				tsi721_omsg_handler(priv, ch); +			} +		} +	} + +	if (dev_int & TSI721_DEV_INT_SRIO) { +		/* Service SRIO MAC interrupts */ +		intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); +		if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) +			tsi721_pw_handler(mport); +	} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +	if (dev_int & TSI721_DEV_INT_BDMA_CH) { +		int ch; + +		if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { +			dev_dbg(&priv->pdev->dev, +				"IRQ from DMA channel 0x%08x\n", dev_ch_int); + +			for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { +				if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) +					continue; +				tsi721_bdma_handler(&priv->bdma[ch]); +			} +		} +	} +#endif + +	/* For MSI mode re-enable device-level interrupts */ +	if (priv->flags & TSI721_USING_MSI) { +		dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | +			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; +		iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); +	} + +	return IRQ_HANDLED; +} + +static void tsi721_interrupts_init(struct tsi721_device *priv) +{ +	u32 intr; + +	/* Enable IDB interrupts */ +	iowrite32(TSI721_SR_CHINT_ALL, +		priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); +	iowrite32(TSI721_SR_CHINT_IDBQRCV, +		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + +	/* Enable SRIO MAC interrupts */ +	iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, +		priv->regs + TSI721_RIO_EM_DEV_INT_EN); + +	/* Enable interrupts from channels in use */ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | +		(TSI721_INT_BDMA_CHAN_M & +		 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); +#else +	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); +#endif +	iowrite32(intr,	priv->regs + TSI721_DEV_CHAN_INTE); + +	if (priv->flags & TSI721_USING_MSIX) +		intr = TSI721_DEV_INT_SRIO; +	else +		intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | +			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; + +	iowrite32(intr, priv->regs + TSI721_DEV_INTE); +	ioread32(priv->regs + TSI721_DEV_INTE); +} + +#ifdef CONFIG_PCI_MSI +/** + * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles outbound messaging interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) +{ +	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; +	int mbox; + +	mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; +	tsi721_omsg_handler(priv, mbox); +	return IRQ_HANDLED; +} + +/** + * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles inbound messaging interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) +{ +	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; +	int mbox; + +	mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; +	tsi721_imsg_handler(priv, mbox + 4); +	return IRQ_HANDLED; +} + +/** + * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles Tsi721 interrupts from SRIO MAC. + */ +static irqreturn_t tsi721_srio_msix(int irq, void *ptr) +{ +	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; +	u32 srio_int; + +	/* Service SRIO MAC interrupts */ +	srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); +	if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) +		tsi721_pw_handler((struct rio_mport *)ptr); + +	return IRQ_HANDLED; +} + +/** + * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles Tsi721 interrupts from SR2PC Channel. + * NOTE: At this moment services only one SR2PC channel associated with inbound + * doorbells. + */ +static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) +{ +	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; +	u32 sr_ch_int; + +	/* Service Inbound DB interrupt from SR2PC channel */ +	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); +	if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) +		tsi721_dbell_handler((struct rio_mport *)ptr); + +	/* Clear interrupts */ +	iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); +	/* Read back to ensure that interrupt was cleared */ +	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + +	return IRQ_HANDLED; +} + +/** + * tsi721_request_msix - register interrupt service for MSI-X mode. + * @mport: RapidIO master port structure + * + * Registers MSI-X interrupt service routines for interrupts that are active + * immediately after mport initialization. Messaging interrupt service routines + * should be registered during corresponding open requests. + */ +static int tsi721_request_msix(struct rio_mport *mport) +{ +	struct tsi721_device *priv = mport->priv; +	int err = 0; + +	err = request_irq(priv->msix[TSI721_VECT_IDB].vector, +			tsi721_sr2pc_ch_msix, 0, +			priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport); +	if (err) +		goto out; + +	err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, +			tsi721_srio_msix, 0, +			priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport); +	if (err) +		free_irq( +			priv->msix[TSI721_VECT_IDB].vector, +			(void *)mport); +out: +	return err; +} + +/** + * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. + * @priv: pointer to tsi721 private data + * + * Configures MSI-X support for Tsi721. Supports only an exact number + * of requested vectors. + */ +static int tsi721_enable_msix(struct tsi721_device *priv) +{ +	struct msix_entry entries[TSI721_VECT_MAX]; +	int err; +	int i; + +	entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); +	entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; + +	/* +	 * Initialize MSI-X entries for Messaging Engine: +	 * this driver supports four RIO mailboxes (inbound and outbound) +	 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore +	 * offset +4 is added to IB MBOX number. +	 */ +	for (i = 0; i < RIO_MAX_MBOX; i++) { +		entries[TSI721_VECT_IMB0_RCV + i].entry = +					TSI721_MSIX_IMSG_DQ_RCV(i + 4); +		entries[TSI721_VECT_IMB0_INT + i].entry = +					TSI721_MSIX_IMSG_INT(i + 4); +		entries[TSI721_VECT_OMB0_DONE + i].entry = +					TSI721_MSIX_OMSG_DONE(i); +		entries[TSI721_VECT_OMB0_INT + i].entry = +					TSI721_MSIX_OMSG_INT(i); +	} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +	/* +	 * Initialize MSI-X entries for Block DMA Engine: +	 * this driver supports XXX DMA channels +	 * (one is reserved for SRIO maintenance transactions) +	 */ +	for (i = 0; i < TSI721_DMA_CHNUM; i++) { +		entries[TSI721_VECT_DMA0_DONE + i].entry = +					TSI721_MSIX_DMACH_DONE(i); +		entries[TSI721_VECT_DMA0_INT + i].entry = +					TSI721_MSIX_DMACH_INT(i); +	} +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +	err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries)); +	if (err) { +		dev_err(&priv->pdev->dev, +			"Failed to enable MSI-X (err=%d)\n", err); +		return err; +	} + +	/* +	 * Copy MSI-X vector information into tsi721 private structure +	 */ +	priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; +	snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, +		 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); +	priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; +	snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, +		 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); + +	for (i = 0; i < RIO_MAX_MBOX; i++) { +		priv->msix[TSI721_VECT_IMB0_RCV + i].vector = +				entries[TSI721_VECT_IMB0_RCV + i].vector; +		snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, +			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", +			 i, pci_name(priv->pdev)); + +		priv->msix[TSI721_VECT_IMB0_INT + i].vector = +				entries[TSI721_VECT_IMB0_INT + i].vector; +		snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, +			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", +			 i, pci_name(priv->pdev)); + +		priv->msix[TSI721_VECT_OMB0_DONE + i].vector = +				entries[TSI721_VECT_OMB0_DONE + i].vector; +		snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, +			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", +			 i, pci_name(priv->pdev)); + +		priv->msix[TSI721_VECT_OMB0_INT + i].vector = +				entries[TSI721_VECT_OMB0_INT + i].vector; +		snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, +			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", +			 i, pci_name(priv->pdev)); +	} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +	for (i = 0; i < TSI721_DMA_CHNUM; i++) { +		priv->msix[TSI721_VECT_DMA0_DONE + i].vector = +				entries[TSI721_VECT_DMA0_DONE + i].vector; +		snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, +			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", +			 i, pci_name(priv->pdev)); + +		priv->msix[TSI721_VECT_DMA0_INT + i].vector = +				entries[TSI721_VECT_DMA0_INT + i].vector; +		snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, +			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", +			 i, pci_name(priv->pdev)); +	} +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +	return 0; +} +#endif /* CONFIG_PCI_MSI */ + +static int tsi721_request_irq(struct rio_mport *mport) +{ +	struct tsi721_device *priv = mport->priv; +	int err; + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) +		err = tsi721_request_msix(mport); +	else +#endif +		err = request_irq(priv->pdev->irq, tsi721_irqhandler, +			  (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, +			  DRV_NAME, (void *)mport); + +	if (err) +		dev_err(&priv->pdev->dev, +			"Unable to allocate interrupt, Error: %d\n", err); + +	return err; +} + +/** + * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) + * translation regions. + * @priv: pointer to tsi721 private data + * + * Disables SREP translation regions. + */ +static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) +{ +	int i; + +	/* Disable all PC2SR translation windows */ +	for (i = 0; i < TSI721_OBWIN_NUM; i++) +		iowrite32(0, priv->regs + TSI721_OBWINLB(i)); +} + +/** + * tsi721_rio_map_inb_mem -- Mapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + * @rstart: RapidIO space start address. + * @size: The mapping region size. + * @flags: Flags for mapping. 0 for using default flags. + * + * Return: 0 -- Success. + * + * This function will create the inbound mapping + * from rstart to lstart. + */ +static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, +		u64 rstart, u32 size, u32 flags) +{ +	struct tsi721_device *priv = mport->priv; +	int i; +	u32 regval; + +	if (!is_power_of_2(size) || size < 0x1000 || +	    ((u64)lstart & (size - 1)) || (rstart & (size - 1))) +		return -EINVAL; + +	/* Search for free inbound translation window */ +	for (i = 0; i < TSI721_IBWIN_NUM; i++) { +		regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); +		if (!(regval & TSI721_IBWIN_LB_WEN)) +			break; +	} + +	if (i >= TSI721_IBWIN_NUM) { +		dev_err(&priv->pdev->dev, +			"Unable to find free inbound window\n"); +		return -EBUSY; +	} + +	iowrite32(TSI721_IBWIN_SIZE(size) << 8, +			priv->regs + TSI721_IBWIN_SZ(i)); + +	iowrite32(((u64)lstart >> 32), priv->regs + TSI721_IBWIN_TUA(i)); +	iowrite32(((u64)lstart & TSI721_IBWIN_TLA_ADD), +		  priv->regs + TSI721_IBWIN_TLA(i)); + +	iowrite32(rstart >> 32, priv->regs + TSI721_IBWIN_UB(i)); +	iowrite32((rstart & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, +		priv->regs + TSI721_IBWIN_LB(i)); +	dev_dbg(&priv->pdev->dev, +		"Configured IBWIN%d mapping (RIO_0x%llx -> PCIe_0x%llx)\n", +		i, rstart, (unsigned long long)lstart); + +	return 0; +} + +/** + * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + */ +static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, +				dma_addr_t lstart) +{ +	struct tsi721_device *priv = mport->priv; +	int i; +	u64 addr; +	u32 regval; + +	/* Search for matching active inbound translation window */ +	for (i = 0; i < TSI721_IBWIN_NUM; i++) { +		regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); +		if (regval & TSI721_IBWIN_LB_WEN) { +			regval = ioread32(priv->regs + TSI721_IBWIN_TUA(i)); +			addr = (u64)regval << 32; +			regval = ioread32(priv->regs + TSI721_IBWIN_TLA(i)); +			addr |= regval & TSI721_IBWIN_TLA_ADD; + +			if (addr == (u64)lstart) { +				iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); +				break; +			} +		} +	} +} + +/** + * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) + * translation regions. + * @priv: pointer to tsi721 private data + * + * Disables inbound windows. + */ +static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) +{ +	int i; + +	/* Disable all SR2PC inbound windows */ +	for (i = 0; i < TSI721_IBWIN_NUM; i++) +		iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); +} + +/** + * tsi721_port_write_init - Inbound port write interface init + * @priv: pointer to tsi721 private data + * + * Initializes inbound port write handler. + * Returns %0 on success or %-ENOMEM on failure. + */ +static int tsi721_port_write_init(struct tsi721_device *priv) +{ +	priv->pw_discard_count = 0; +	INIT_WORK(&priv->pw_work, tsi721_pw_dpc); +	spin_lock_init(&priv->pw_fifo_lock); +	if (kfifo_alloc(&priv->pw_fifo, +			TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { +		dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n"); +		return -ENOMEM; +	} + +	/* Use reliable port-write capture mode */ +	iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); +	return 0; +} + +static int tsi721_doorbell_init(struct tsi721_device *priv) +{ +	/* Outbound Doorbells do not require any setup. +	 * Tsi721 uses dedicated PCI BAR1 to generate doorbells. +	 * That BAR1 was mapped during the probe routine. +	 */ + +	/* Initialize Inbound Doorbell processing DPC and queue */ +	priv->db_discard_count = 0; +	INIT_WORK(&priv->idb_work, tsi721_db_dpc); + +	/* Allocate buffer for inbound doorbells queue */ +	priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, +				IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, +				&priv->idb_dma, GFP_KERNEL); +	if (!priv->idb_base) +		return -ENOMEM; + +	dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", +		priv->idb_base, (unsigned long long)priv->idb_dma); + +	iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), +		priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); +	iowrite32(((u64)priv->idb_dma >> 32), +		priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); +	iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), +		priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); +	/* Enable accepting all inbound doorbells */ +	iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); + +	iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); + +	iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); + +	return 0; +} + +static void tsi721_doorbell_free(struct tsi721_device *priv) +{ +	if (priv->idb_base == NULL) +		return; + +	/* Free buffer allocated for inbound doorbell queue */ +	dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, +			  priv->idb_base, priv->idb_dma); +	priv->idb_base = NULL; +} + +/** + * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. + * @priv: pointer to tsi721 private data + * + * Initialize BDMA channel allocated for RapidIO maintenance read/write + * request generation + * Returns %0 on success or %-ENOMEM on failure. + */ +static int tsi721_bdma_maint_init(struct tsi721_device *priv) +{ +	struct tsi721_dma_desc *bd_ptr; +	u64		*sts_ptr; +	dma_addr_t	bd_phys, sts_phys; +	int		sts_size; +	int		bd_num = 2; +	void __iomem	*regs; + +	dev_dbg(&priv->pdev->dev, +		"Init Block DMA Engine for Maintenance requests, CH%d\n", +		TSI721_DMACH_MAINT); + +	/* +	 * Initialize DMA channel for maintenance requests +	 */ + +	priv->mdma.ch_id = TSI721_DMACH_MAINT; +	regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); + +	/* Allocate space for DMA descriptors */ +	bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, +					bd_num * sizeof(struct tsi721_dma_desc), +					&bd_phys, GFP_KERNEL); +	if (!bd_ptr) +		return -ENOMEM; + +	priv->mdma.bd_num = bd_num; +	priv->mdma.bd_phys = bd_phys; +	priv->mdma.bd_base = bd_ptr; + +	dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", +		bd_ptr, (unsigned long long)bd_phys); + +	/* Allocate space for descriptor status FIFO */ +	sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? +					bd_num : TSI721_DMA_MINSTSSZ; +	sts_size = roundup_pow_of_two(sts_size); +	sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, +				     sts_size * sizeof(struct tsi721_dma_sts), +				     &sts_phys, GFP_KERNEL); +	if (!sts_ptr) { +		/* Free space allocated for DMA descriptors */ +		dma_free_coherent(&priv->pdev->dev, +				  bd_num * sizeof(struct tsi721_dma_desc), +				  bd_ptr, bd_phys); +		priv->mdma.bd_base = NULL; +		return -ENOMEM; +	} + +	priv->mdma.sts_phys = sts_phys; +	priv->mdma.sts_base = sts_ptr; +	priv->mdma.sts_size = sts_size; + +	dev_dbg(&priv->pdev->dev, +		"desc status FIFO @ %p (phys = %llx) size=0x%x\n", +		sts_ptr, (unsigned long long)sts_phys, sts_size); + +	/* Initialize DMA descriptors ring */ +	bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); +	bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & +						 TSI721_DMAC_DPTRL_MASK); +	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); + +	/* Setup DMA descriptor pointers */ +	iowrite32(((u64)bd_phys >> 32),	regs + TSI721_DMAC_DPTRH); +	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), +		regs + TSI721_DMAC_DPTRL); + +	/* Setup descriptor status FIFO */ +	iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); +	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), +		regs + TSI721_DMAC_DSBL); +	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), +		regs + TSI721_DMAC_DSSZ); + +	/* Clear interrupt bits */ +	iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); + +	ioread32(regs + TSI721_DMAC_INT); + +	/* Toggle DMA channel initialization */ +	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL); +	ioread32(regs + TSI721_DMAC_CTL); +	udelay(10); + +	return 0; +} + +static int tsi721_bdma_maint_free(struct tsi721_device *priv) +{ +	u32 ch_stat; +	struct tsi721_bdma_maint *mdma = &priv->mdma; +	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); + +	if (mdma->bd_base == NULL) +		return 0; + +	/* Check if DMA channel still running */ +	ch_stat = ioread32(regs + TSI721_DMAC_STS); +	if (ch_stat & TSI721_DMAC_STS_RUN) +		return -EFAULT; + +	/* Put DMA channel into init state */ +	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL); + +	/* Free space allocated for DMA descriptors */ +	dma_free_coherent(&priv->pdev->dev, +		mdma->bd_num * sizeof(struct tsi721_dma_desc), +		mdma->bd_base, mdma->bd_phys); +	mdma->bd_base = NULL; + +	/* Free space allocated for status FIFO */ +	dma_free_coherent(&priv->pdev->dev, +		mdma->sts_size * sizeof(struct tsi721_dma_sts), +		mdma->sts_base, mdma->sts_phys); +	mdma->sts_base = NULL; +	return 0; +} + +/* Enable Inbound Messaging Interrupts */ +static void +tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, +				  u32 inte_mask) +{ +	u32 rval; + +	if (!inte_mask) +		return; + +	/* Clear pending Inbound Messaging interrupts */ +	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); + +	/* Enable Inbound Messaging interrupts */ +	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); +	iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); + +	if (priv->flags & TSI721_USING_MSIX) +		return; /* Finished if we are in MSI-X mode */ + +	/* +	 * For MSI and INTA interrupt signalling we need to enable next levels +	 */ + +	/* Enable Device Channel Interrupt */ +	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +	iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), +		  priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Disable Inbound Messaging Interrupts */ +static void +tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, +				   u32 inte_mask) +{ +	u32 rval; + +	if (!inte_mask) +		return; + +	/* Clear pending Inbound Messaging interrupts */ +	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); + +	/* Disable Inbound Messaging interrupts */ +	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); +	rval &= ~inte_mask; +	iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); + +	if (priv->flags & TSI721_USING_MSIX) +		return; /* Finished if we are in MSI-X mode */ + +	/* +	 * For MSI and INTA interrupt signalling we need to disable next levels +	 */ + +	/* Disable Device Channel Interrupt */ +	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +	rval &= ~TSI721_INT_IMSG_CHAN(ch); +	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Enable Outbound Messaging interrupts */ +static void +tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, +				  u32 inte_mask) +{ +	u32 rval; + +	if (!inte_mask) +		return; + +	/* Clear pending Outbound Messaging interrupts */ +	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); + +	/* Enable Outbound Messaging channel interrupts */ +	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); +	iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); + +	if (priv->flags & TSI721_USING_MSIX) +		return; /* Finished if we are in MSI-X mode */ + +	/* +	 * For MSI and INTA interrupt signalling we need to enable next levels +	 */ + +	/* Enable Device Channel Interrupt */ +	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +	iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), +		  priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Disable Outbound Messaging interrupts */ +static void +tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, +				   u32 inte_mask) +{ +	u32 rval; + +	if (!inte_mask) +		return; + +	/* Clear pending Outbound Messaging interrupts */ +	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); + +	/* Disable Outbound Messaging interrupts */ +	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); +	rval &= ~inte_mask; +	iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); + +	if (priv->flags & TSI721_USING_MSIX) +		return; /* Finished if we are in MSI-X mode */ + +	/* +	 * For MSI and INTA interrupt signalling we need to disable next levels +	 */ + +	/* Disable Device Channel Interrupt */ +	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +	rval &= ~TSI721_INT_OMSG_CHAN(ch); +	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); +} + +/** + * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue + * @mport: Master port with outbound message queue + * @rdev: Target of outbound message + * @mbox: Outbound mailbox + * @buffer: Message to add to outbound queue + * @len: Length of message + */ +static int +tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, +			void *buffer, size_t len) +{ +	struct tsi721_device *priv = mport->priv; +	struct tsi721_omsg_desc *desc; +	u32 tx_slot; + +	if (!priv->omsg_init[mbox] || +	    len > TSI721_MSG_MAX_SIZE || len < 8) +		return -EINVAL; + +	tx_slot = priv->omsg_ring[mbox].tx_slot; + +	/* Copy copy message into transfer buffer */ +	memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); + +	if (len & 0x7) +		len += 8; + +	/* Build descriptor associated with buffer */ +	desc = priv->omsg_ring[mbox].omd_base; +	desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); +	if (tx_slot % 4 == 0) +		desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); + +	desc[tx_slot].msg_info = +		cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | +			    (0xe << 12) | (len & 0xff8)); +	desc[tx_slot].bufptr_lo = +		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & +			    0xffffffff); +	desc[tx_slot].bufptr_hi = +		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); + +	priv->omsg_ring[mbox].wr_count++; + +	/* Go to next descriptor */ +	if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { +		priv->omsg_ring[mbox].tx_slot = 0; +		/* Move through the ring link descriptor at the end */ +		priv->omsg_ring[mbox].wr_count++; +	} + +	mb(); + +	/* Set new write count value */ +	iowrite32(priv->omsg_ring[mbox].wr_count, +		priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); +	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + +	return 0; +} + +/** + * tsi721_omsg_handler - Outbound Message Interrupt Handler + * @priv: pointer to tsi721 private data + * @ch:   number of OB MSG channel to service + * + * Services channel interrupts from outbound messaging engine. + */ +static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) +{ +	u32 omsg_int; + +	spin_lock(&priv->omsg_ring[ch].lock); + +	omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); + +	if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) +		dev_info(&priv->pdev->dev, +			"OB MBOX%d: Status FIFO is full\n", ch); + +	if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { +		u32 srd_ptr; +		u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; +		int i, j; +		u32 tx_slot; + +		/* +		 * Find last successfully processed descriptor +		 */ + +		/* Check and clear descriptor status FIFO entries */ +		srd_ptr = priv->omsg_ring[ch].sts_rdptr; +		sts_ptr = priv->omsg_ring[ch].sts_base; +		j = srd_ptr * 8; +		while (sts_ptr[j]) { +			for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { +				prev_ptr = last_ptr; +				last_ptr = le64_to_cpu(sts_ptr[j]); +				sts_ptr[j] = 0; +			} + +			++srd_ptr; +			srd_ptr %= priv->omsg_ring[ch].sts_size; +			j = srd_ptr * 8; +		} + +		if (last_ptr == 0) +			goto no_sts_update; + +		priv->omsg_ring[ch].sts_rdptr = srd_ptr; +		iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); + +		if (!priv->mport->outb_msg[ch].mcback) +			goto no_sts_update; + +		/* Inform upper layer about transfer completion */ + +		tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ +						sizeof(struct tsi721_omsg_desc); + +		/* +		 * Check if this is a Link Descriptor (LD). +		 * If yes, ignore LD and use descriptor processed +		 * before LD. +		 */ +		if (tx_slot == priv->omsg_ring[ch].size) { +			if (prev_ptr) +				tx_slot = (prev_ptr - +					(u64)priv->omsg_ring[ch].omd_phys)/ +						sizeof(struct tsi721_omsg_desc); +			else +				goto no_sts_update; +		} + +		/* Move slot index to the next message to be sent */ +		++tx_slot; +		if (tx_slot == priv->omsg_ring[ch].size) +			tx_slot = 0; +		BUG_ON(tx_slot >= priv->omsg_ring[ch].size); +		priv->mport->outb_msg[ch].mcback(priv->mport, +				priv->omsg_ring[ch].dev_id, ch, +				tx_slot); +	} + +no_sts_update: + +	if (omsg_int & TSI721_OBDMAC_INT_ERROR) { +		/* +		* Outbound message operation aborted due to error, +		* reinitialize OB MSG channel +		*/ + +		dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n", +			ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); + +		iowrite32(TSI721_OBDMAC_INT_ERROR, +				priv->regs + TSI721_OBDMAC_INT(ch)); +		iowrite32(TSI721_OBDMAC_CTL_INIT, +				priv->regs + TSI721_OBDMAC_CTL(ch)); +		ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); + +		/* Inform upper level to clear all pending tx slots */ +		if (priv->mport->outb_msg[ch].mcback) +			priv->mport->outb_msg[ch].mcback(priv->mport, +					priv->omsg_ring[ch].dev_id, ch, +					priv->omsg_ring[ch].tx_slot); +		/* Synch tx_slot tracking */ +		iowrite32(priv->omsg_ring[ch].tx_slot, +			priv->regs + TSI721_OBDMAC_DRDCNT(ch)); +		ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); +		priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; +		priv->omsg_ring[ch].sts_rdptr = 0; +	} + +	/* Clear channel interrupts */ +	iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); + +	if (!(priv->flags & TSI721_USING_MSIX)) { +		u32 ch_inte; + +		/* Re-enable channel interrupts */ +		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +		ch_inte |= TSI721_INT_OMSG_CHAN(ch); +		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); +	} + +	spin_unlock(&priv->omsg_ring[ch].lock); +} + +/** + * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox + * @mport: Master port implementing Outbound Messaging Engine + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the outbound mailbox ring + */ +static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, +				 int mbox, int entries) +{ +	struct tsi721_device *priv = mport->priv; +	struct tsi721_omsg_desc *bd_ptr; +	int i, rc = 0; + +	if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || +	    (entries > (TSI721_OMSGD_RING_SIZE)) || +	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { +		rc = -EINVAL; +		goto out; +	} + +	priv->omsg_ring[mbox].dev_id = dev_id; +	priv->omsg_ring[mbox].size = entries; +	priv->omsg_ring[mbox].sts_rdptr = 0; +	spin_lock_init(&priv->omsg_ring[mbox].lock); + +	/* Outbound Msg Buffer allocation based on +	   the number of maximum descriptor entries */ +	for (i = 0; i < entries; i++) { +		priv->omsg_ring[mbox].omq_base[i] = +			dma_alloc_coherent( +				&priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, +				&priv->omsg_ring[mbox].omq_phys[i], +				GFP_KERNEL); +		if (priv->omsg_ring[mbox].omq_base[i] == NULL) { +			dev_dbg(&priv->pdev->dev, +				"Unable to allocate OB MSG data buffer for" +				" MBOX%d\n", mbox); +			rc = -ENOMEM; +			goto out_buf; +		} +	} + +	/* Outbound message descriptor allocation */ +	priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( +				&priv->pdev->dev, +				(entries + 1) * sizeof(struct tsi721_omsg_desc), +				&priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); +	if (priv->omsg_ring[mbox].omd_base == NULL) { +		dev_dbg(&priv->pdev->dev, +			"Unable to allocate OB MSG descriptor memory " +			"for MBOX%d\n", mbox); +		rc = -ENOMEM; +		goto out_buf; +	} + +	priv->omsg_ring[mbox].tx_slot = 0; + +	/* Outbound message descriptor status FIFO allocation */ +	priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); +	priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, +			priv->omsg_ring[mbox].sts_size * +						sizeof(struct tsi721_dma_sts), +			&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); +	if (priv->omsg_ring[mbox].sts_base == NULL) { +		dev_dbg(&priv->pdev->dev, +			"Unable to allocate OB MSG descriptor status FIFO " +			"for MBOX%d\n", mbox); +		rc = -ENOMEM; +		goto out_desc; +	} + +	/* +	 * Configure Outbound Messaging Engine +	 */ + +	/* Setup Outbound Message descriptor pointer */ +	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), +			priv->regs + TSI721_OBDMAC_DPTRH(mbox)); +	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & +					TSI721_OBDMAC_DPTRL_MASK), +			priv->regs + TSI721_OBDMAC_DPTRL(mbox)); + +	/* Setup Outbound Message descriptor status FIFO */ +	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), +			priv->regs + TSI721_OBDMAC_DSBH(mbox)); +	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & +					TSI721_OBDMAC_DSBL_MASK), +			priv->regs + TSI721_OBDMAC_DSBL(mbox)); +	iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), +		priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); + +	/* Enable interrupts */ + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) { +		/* Request interrupt service if we are in MSI-X mode */ +		rc = request_irq( +			priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, +			tsi721_omsg_msix, 0, +			priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name, +			(void *)mport); + +		if (rc) { +			dev_dbg(&priv->pdev->dev, +				"Unable to allocate MSI-X interrupt for " +				"OBOX%d-DONE\n", mbox); +			goto out_stat; +		} + +		rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, +			tsi721_omsg_msix, 0, +			priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name, +			(void *)mport); + +		if (rc)	{ +			dev_dbg(&priv->pdev->dev, +				"Unable to allocate MSI-X interrupt for " +				"MBOX%d-INT\n", mbox); +			free_irq( +				priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, +				(void *)mport); +			goto out_stat; +		} +	} +#endif /* CONFIG_PCI_MSI */ + +	tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); + +	/* Initialize Outbound Message descriptors ring */ +	bd_ptr = priv->omsg_ring[mbox].omd_base; +	bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); +	bd_ptr[entries].msg_info = 0; +	bd_ptr[entries].next_lo = +		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & +		TSI721_OBDMAC_DPTRL_MASK); +	bd_ptr[entries].next_hi = +		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); +	priv->omsg_ring[mbox].wr_count = 0; +	mb(); + +	/* Initialize Outbound Message engine */ +	iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); +	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); +	udelay(10); + +	priv->omsg_init[mbox] = 1; + +	return 0; + +#ifdef CONFIG_PCI_MSI +out_stat: +	dma_free_coherent(&priv->pdev->dev, +		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), +		priv->omsg_ring[mbox].sts_base, +		priv->omsg_ring[mbox].sts_phys); + +	priv->omsg_ring[mbox].sts_base = NULL; +#endif /* CONFIG_PCI_MSI */ + +out_desc: +	dma_free_coherent(&priv->pdev->dev, +		(entries + 1) * sizeof(struct tsi721_omsg_desc), +		priv->omsg_ring[mbox].omd_base, +		priv->omsg_ring[mbox].omd_phys); + +	priv->omsg_ring[mbox].omd_base = NULL; + +out_buf: +	for (i = 0; i < priv->omsg_ring[mbox].size; i++) { +		if (priv->omsg_ring[mbox].omq_base[i]) { +			dma_free_coherent(&priv->pdev->dev, +				TSI721_MSG_BUFFER_SIZE, +				priv->omsg_ring[mbox].omq_base[i], +				priv->omsg_ring[mbox].omq_phys[i]); + +			priv->omsg_ring[mbox].omq_base[i] = NULL; +		} +	} + +out: +	return rc; +} + +/** + * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox + * @mport: Master port implementing the outbound message unit + * @mbox: Mailbox to close + */ +static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) +{ +	struct tsi721_device *priv = mport->priv; +	u32 i; + +	if (!priv->omsg_init[mbox]) +		return; +	priv->omsg_init[mbox] = 0; + +	/* Disable Interrupts */ + +	tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) { +		free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, +			 (void *)mport); +		free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, +			 (void *)mport); +	} +#endif /* CONFIG_PCI_MSI */ + +	/* Free OMSG Descriptor Status FIFO */ +	dma_free_coherent(&priv->pdev->dev, +		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), +		priv->omsg_ring[mbox].sts_base, +		priv->omsg_ring[mbox].sts_phys); + +	priv->omsg_ring[mbox].sts_base = NULL; + +	/* Free OMSG descriptors */ +	dma_free_coherent(&priv->pdev->dev, +		(priv->omsg_ring[mbox].size + 1) * +			sizeof(struct tsi721_omsg_desc), +		priv->omsg_ring[mbox].omd_base, +		priv->omsg_ring[mbox].omd_phys); + +	priv->omsg_ring[mbox].omd_base = NULL; + +	/* Free message buffers */ +	for (i = 0; i < priv->omsg_ring[mbox].size; i++) { +		if (priv->omsg_ring[mbox].omq_base[i]) { +			dma_free_coherent(&priv->pdev->dev, +				TSI721_MSG_BUFFER_SIZE, +				priv->omsg_ring[mbox].omq_base[i], +				priv->omsg_ring[mbox].omq_phys[i]); + +			priv->omsg_ring[mbox].omq_base[i] = NULL; +		} +	} +} + +/** + * tsi721_imsg_handler - Inbound Message Interrupt Handler + * @priv: pointer to tsi721 private data + * @ch: inbound message channel number to service + * + * Services channel interrupts from inbound messaging engine. + */ +static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) +{ +	u32 mbox = ch - 4; +	u32 imsg_int; + +	spin_lock(&priv->imsg_ring[mbox].lock); + +	imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); + +	if (imsg_int & TSI721_IBDMAC_INT_SRTO) +		dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n", +			mbox); + +	if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) +		dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n", +			mbox); + +	if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) +		dev_info(&priv->pdev->dev, +			"IB MBOX%d IB free queue low\n", mbox); + +	/* Clear IB channel interrupts */ +	iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); + +	/* If an IB Msg is received notify the upper layer */ +	if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && +		priv->mport->inb_msg[mbox].mcback) +		priv->mport->inb_msg[mbox].mcback(priv->mport, +				priv->imsg_ring[mbox].dev_id, mbox, -1); + +	if (!(priv->flags & TSI721_USING_MSIX)) { +		u32 ch_inte; + +		/* Re-enable channel interrupts */ +		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); +		ch_inte |= TSI721_INT_IMSG_CHAN(ch); +		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); +	} + +	spin_unlock(&priv->imsg_ring[mbox].lock); +} + +/** + * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox + * @mport: Master port implementing the Inbound Messaging Engine + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the inbound mailbox ring + */ +static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, +				int mbox, int entries) +{ +	struct tsi721_device *priv = mport->priv; +	int ch = mbox + 4; +	int i; +	u64 *free_ptr; +	int rc = 0; + +	if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || +	    (entries > TSI721_IMSGD_RING_SIZE) || +	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { +		rc = -EINVAL; +		goto out; +	} + +	/* Initialize IB Messaging Ring */ +	priv->imsg_ring[mbox].dev_id = dev_id; +	priv->imsg_ring[mbox].size = entries; +	priv->imsg_ring[mbox].rx_slot = 0; +	priv->imsg_ring[mbox].desc_rdptr = 0; +	priv->imsg_ring[mbox].fq_wrptr = 0; +	for (i = 0; i < priv->imsg_ring[mbox].size; i++) +		priv->imsg_ring[mbox].imq_base[i] = NULL; +	spin_lock_init(&priv->imsg_ring[mbox].lock); + +	/* Allocate buffers for incoming messages */ +	priv->imsg_ring[mbox].buf_base = +		dma_alloc_coherent(&priv->pdev->dev, +				   entries * TSI721_MSG_BUFFER_SIZE, +				   &priv->imsg_ring[mbox].buf_phys, +				   GFP_KERNEL); + +	if (priv->imsg_ring[mbox].buf_base == NULL) { +		dev_err(&priv->pdev->dev, +			"Failed to allocate buffers for IB MBOX%d\n", mbox); +		rc = -ENOMEM; +		goto out; +	} + +	/* Allocate memory for circular free list */ +	priv->imsg_ring[mbox].imfq_base = +		dma_alloc_coherent(&priv->pdev->dev, +				   entries * 8, +				   &priv->imsg_ring[mbox].imfq_phys, +				   GFP_KERNEL); + +	if (priv->imsg_ring[mbox].imfq_base == NULL) { +		dev_err(&priv->pdev->dev, +			"Failed to allocate free queue for IB MBOX%d\n", mbox); +		rc = -ENOMEM; +		goto out_buf; +	} + +	/* Allocate memory for Inbound message descriptors */ +	priv->imsg_ring[mbox].imd_base = +		dma_alloc_coherent(&priv->pdev->dev, +				   entries * sizeof(struct tsi721_imsg_desc), +				   &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); + +	if (priv->imsg_ring[mbox].imd_base == NULL) { +		dev_err(&priv->pdev->dev, +			"Failed to allocate descriptor memory for IB MBOX%d\n", +			mbox); +		rc = -ENOMEM; +		goto out_dma; +	} + +	/* Fill free buffer pointer list */ +	free_ptr = priv->imsg_ring[mbox].imfq_base; +	for (i = 0; i < entries; i++) +		free_ptr[i] = cpu_to_le64( +				(u64)(priv->imsg_ring[mbox].buf_phys) + +				i * 0x1000); + +	mb(); + +	/* +	 * For mapping of inbound SRIO Messages into appropriate queues we need +	 * to set Inbound Device ID register in the messaging engine. We do it +	 * once when first inbound mailbox is requested. +	 */ +	if (!(priv->flags & TSI721_IMSGID_SET)) { +		iowrite32((u32)priv->mport->host_deviceid, +			priv->regs + TSI721_IB_DEVID); +		priv->flags |= TSI721_IMSGID_SET; +	} + +	/* +	 * Configure Inbound Messaging channel (ch = mbox + 4) +	 */ + +	/* Setup Inbound Message free queue */ +	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), +		priv->regs + TSI721_IBDMAC_FQBH(ch)); +	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & +			TSI721_IBDMAC_FQBL_MASK), +		priv->regs+TSI721_IBDMAC_FQBL(ch)); +	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), +		priv->regs + TSI721_IBDMAC_FQSZ(ch)); + +	/* Setup Inbound Message descriptor queue */ +	iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), +		priv->regs + TSI721_IBDMAC_DQBH(ch)); +	iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & +		   (u32)TSI721_IBDMAC_DQBL_MASK), +		priv->regs+TSI721_IBDMAC_DQBL(ch)); +	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), +		priv->regs + TSI721_IBDMAC_DQSZ(ch)); + +	/* Enable interrupts */ + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) { +		/* Request interrupt service if we are in MSI-X mode */ +		rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, +			tsi721_imsg_msix, 0, +			priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name, +			(void *)mport); + +		if (rc) { +			dev_dbg(&priv->pdev->dev, +				"Unable to allocate MSI-X interrupt for " +				"IBOX%d-DONE\n", mbox); +			goto out_desc; +		} + +		rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, +			tsi721_imsg_msix, 0, +			priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name, +			(void *)mport); + +		if (rc)	{ +			dev_dbg(&priv->pdev->dev, +				"Unable to allocate MSI-X interrupt for " +				"IBOX%d-INT\n", mbox); +			free_irq( +				priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, +				(void *)mport); +			goto out_desc; +		} +	} +#endif /* CONFIG_PCI_MSI */ + +	tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); + +	/* Initialize Inbound Message Engine */ +	iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); +	ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); +	udelay(10); +	priv->imsg_ring[mbox].fq_wrptr = entries - 1; +	iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); + +	priv->imsg_init[mbox] = 1; +	return 0; + +#ifdef CONFIG_PCI_MSI +out_desc: +	dma_free_coherent(&priv->pdev->dev, +		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), +		priv->imsg_ring[mbox].imd_base, +		priv->imsg_ring[mbox].imd_phys); + +	priv->imsg_ring[mbox].imd_base = NULL; +#endif /* CONFIG_PCI_MSI */ + +out_dma: +	dma_free_coherent(&priv->pdev->dev, +		priv->imsg_ring[mbox].size * 8, +		priv->imsg_ring[mbox].imfq_base, +		priv->imsg_ring[mbox].imfq_phys); + +	priv->imsg_ring[mbox].imfq_base = NULL; + +out_buf: +	dma_free_coherent(&priv->pdev->dev, +		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, +		priv->imsg_ring[mbox].buf_base, +		priv->imsg_ring[mbox].buf_phys); + +	priv->imsg_ring[mbox].buf_base = NULL; + +out: +	return rc; +} + +/** + * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Mailbox to close + */ +static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) +{ +	struct tsi721_device *priv = mport->priv; +	u32 rx_slot; +	int ch = mbox + 4; + +	if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ +		return; +	priv->imsg_init[mbox] = 0; + +	/* Disable Inbound Messaging Engine */ + +	/* Disable Interrupts */ +	tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) { +		free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, +				(void *)mport); +		free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, +				(void *)mport); +	} +#endif /* CONFIG_PCI_MSI */ + +	/* Clear Inbound Buffer Queue */ +	for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) +		priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; + +	/* Free memory allocated for message buffers */ +	dma_free_coherent(&priv->pdev->dev, +		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, +		priv->imsg_ring[mbox].buf_base, +		priv->imsg_ring[mbox].buf_phys); + +	priv->imsg_ring[mbox].buf_base = NULL; + +	/* Free memory allocated for free pointr list */ +	dma_free_coherent(&priv->pdev->dev, +		priv->imsg_ring[mbox].size * 8, +		priv->imsg_ring[mbox].imfq_base, +		priv->imsg_ring[mbox].imfq_phys); + +	priv->imsg_ring[mbox].imfq_base = NULL; + +	/* Free memory allocated for RX descriptors */ +	dma_free_coherent(&priv->pdev->dev, +		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), +		priv->imsg_ring[mbox].imd_base, +		priv->imsg_ring[mbox].imd_phys); + +	priv->imsg_ring[mbox].imd_base = NULL; +} + +/** + * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * @buf: Buffer to add to inbound queue + */ +static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) +{ +	struct tsi721_device *priv = mport->priv; +	u32 rx_slot; +	int rc = 0; + +	rx_slot = priv->imsg_ring[mbox].rx_slot; +	if (priv->imsg_ring[mbox].imq_base[rx_slot]) { +		dev_err(&priv->pdev->dev, +			"Error adding inbound buffer %d, buffer exists\n", +			rx_slot); +		rc = -EINVAL; +		goto out; +	} + +	priv->imsg_ring[mbox].imq_base[rx_slot] = buf; + +	if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) +		priv->imsg_ring[mbox].rx_slot = 0; + +out: +	return rc; +} + +/** + * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * + * Returns pointer to the message on success or NULL on failure. + */ +static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) +{ +	struct tsi721_device *priv = mport->priv; +	struct tsi721_imsg_desc *desc; +	u32 rx_slot; +	void *rx_virt = NULL; +	u64 rx_phys; +	void *buf = NULL; +	u64 *free_ptr; +	int ch = mbox + 4; +	int msg_size; + +	if (!priv->imsg_init[mbox]) +		return NULL; + +	desc = priv->imsg_ring[mbox].imd_base; +	desc += priv->imsg_ring[mbox].desc_rdptr; + +	if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) +		goto out; + +	rx_slot = priv->imsg_ring[mbox].rx_slot; +	while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { +		if (++rx_slot == priv->imsg_ring[mbox].size) +			rx_slot = 0; +	} + +	rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | +			le32_to_cpu(desc->bufptr_lo); + +	rx_virt = priv->imsg_ring[mbox].buf_base + +		  (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); + +	buf = priv->imsg_ring[mbox].imq_base[rx_slot]; +	msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; +	if (msg_size == 0) +		msg_size = RIO_MAX_MSG_SIZE; + +	memcpy(buf, rx_virt, msg_size); +	priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; + +	desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); +	if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) +		priv->imsg_ring[mbox].desc_rdptr = 0; + +	iowrite32(priv->imsg_ring[mbox].desc_rdptr, +		priv->regs + TSI721_IBDMAC_DQRP(ch)); + +	/* Return free buffer into the pointer list */ +	free_ptr = priv->imsg_ring[mbox].imfq_base; +	free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); + +	if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) +		priv->imsg_ring[mbox].fq_wrptr = 0; + +	iowrite32(priv->imsg_ring[mbox].fq_wrptr, +		priv->regs + TSI721_IBDMAC_FQWP(ch)); +out: +	return buf; +} + +/** + * tsi721_messages_init - Initialization of Messaging Engine + * @priv: pointer to tsi721 private data + * + * Configures Tsi721 messaging engine. + */ +static int tsi721_messages_init(struct tsi721_device *priv) +{ +	int	ch; + +	iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); +	iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); +	iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); + +	/* Set SRIO Message Request/Response Timeout */ +	iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); + +	/* Initialize Inbound Messaging Engine Registers */ +	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { +		/* Clear interrupt bits */ +		iowrite32(TSI721_IBDMAC_INT_MASK, +			priv->regs + TSI721_IBDMAC_INT(ch)); +		/* Clear Status */ +		iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); + +		iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, +				priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); +		iowrite32(TSI721_SMSG_ECC_NCOR_MASK, +				priv->regs + TSI721_SMSG_ECC_NCOR(ch)); +	} + +	return 0; +} + +/** + * tsi721_disable_ints - disables all device interrupts + * @priv: pointer to tsi721 private data + */ +static void tsi721_disable_ints(struct tsi721_device *priv) +{ +	int ch; + +	/* Disable all device level interrupts */ +	iowrite32(0, priv->regs + TSI721_DEV_INTE); + +	/* Disable all Device Channel interrupts */ +	iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); + +	/* Disable all Inbound Msg Channel interrupts */ +	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) +		iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); + +	/* Disable all Outbound Msg Channel interrupts */ +	for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) +		iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); + +	/* Disable all general messaging interrupts */ +	iowrite32(0, priv->regs + TSI721_SMSG_INTE); + +	/* Disable all BDMA Channel interrupts */ +	for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) +		iowrite32(0, +			priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); + +	/* Disable all general BDMA interrupts */ +	iowrite32(0, priv->regs + TSI721_BDMA_INTE); + +	/* Disable all SRIO Channel interrupts */ +	for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) +		iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); + +	/* Disable all general SR2PC interrupts */ +	iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); + +	/* Disable all PC2SR interrupts */ +	iowrite32(0, priv->regs + TSI721_PC2SR_INTE); + +	/* Disable all I2C interrupts */ +	iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); + +	/* Disable SRIO MAC interrupts */ +	iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); +	iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); +} + +/** + * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port + * @priv: pointer to tsi721 private data + * + * Configures Tsi721 as RapidIO master port. + */ +static int tsi721_setup_mport(struct tsi721_device *priv) +{ +	struct pci_dev *pdev = priv->pdev; +	int err = 0; +	struct rio_ops *ops; + +	struct rio_mport *mport; + +	ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); +	if (!ops) { +		dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n"); +		return -ENOMEM; +	} + +	ops->lcread = tsi721_lcread; +	ops->lcwrite = tsi721_lcwrite; +	ops->cread = tsi721_cread_dma; +	ops->cwrite = tsi721_cwrite_dma; +	ops->dsend = tsi721_dsend; +	ops->open_inb_mbox = tsi721_open_inb_mbox; +	ops->close_inb_mbox = tsi721_close_inb_mbox; +	ops->open_outb_mbox = tsi721_open_outb_mbox; +	ops->close_outb_mbox = tsi721_close_outb_mbox; +	ops->add_outb_message = tsi721_add_outb_message; +	ops->add_inb_buffer = tsi721_add_inb_buffer; +	ops->get_inb_message = tsi721_get_inb_message; +	ops->map_inb = tsi721_rio_map_inb_mem; +	ops->unmap_inb = tsi721_rio_unmap_inb_mem; + +	mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); +	if (!mport) { +		kfree(ops); +		dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n"); +		return -ENOMEM; +	} + +	mport->ops = ops; +	mport->index = 0; +	mport->sys_size = 0; /* small system */ +	mport->phy_type = RIO_PHY_SERIAL; +	mport->priv = (void *)priv; +	mport->phys_efptr = 0x100; +	mport->dev.parent = &pdev->dev; +	priv->mport = mport; + +	INIT_LIST_HEAD(&mport->dbells); + +	rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); +	rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); +	rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); +	snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)", +		 dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); + +	/* Hook up interrupt handler */ + +#ifdef CONFIG_PCI_MSI +	if (!tsi721_enable_msix(priv)) +		priv->flags |= TSI721_USING_MSIX; +	else if (!pci_enable_msi(pdev)) +		priv->flags |= TSI721_USING_MSI; +	else +		dev_info(&pdev->dev, +			 "MSI/MSI-X is not available. Using legacy INTx.\n"); +#endif /* CONFIG_PCI_MSI */ + +	err = tsi721_request_irq(mport); + +	if (!err) { +		tsi721_interrupts_init(priv); +		ops->pwenable = tsi721_pw_enable; +	} else { +		dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " +			"vector %02X err=0x%x\n", pdev->irq, err); +		goto err_exit; +	} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +	tsi721_register_dma(priv); +#endif +	/* Enable SRIO link */ +	iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | +		  TSI721_DEVCTL_SRBOOT_CMPL, +		  priv->regs + TSI721_DEVCTL); + +	rio_register_mport(mport); + +	if (mport->host_deviceid >= 0) +		iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | +			  RIO_PORT_GEN_DISCOVERED, +			  priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); +	else +		iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); + +	return 0; + +err_exit: +	kfree(mport); +	kfree(ops); +	return err; +} + +static int tsi721_probe(struct pci_dev *pdev, +				  const struct pci_device_id *id) +{ +	struct tsi721_device *priv; +	int err; + +	priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); +	if (priv == NULL) { +		dev_err(&pdev->dev, "Failed to allocate memory for device\n"); +		err = -ENOMEM; +		goto err_exit; +	} + +	err = pci_enable_device(pdev); +	if (err) { +		dev_err(&pdev->dev, "Failed to enable PCI device\n"); +		goto err_clean; +	} + +	priv->pdev = pdev; + +#ifdef DEBUG +	{ +	int i; +	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { +		dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", +			i, (unsigned long long)pci_resource_start(pdev, i), +			(unsigned long)pci_resource_len(pdev, i), +			pci_resource_flags(pdev, i)); +	} +	} +#endif +	/* +	 * Verify BAR configuration +	 */ + +	/* BAR_0 (registers) must be 512KB+ in 32-bit address space */ +	if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || +	    pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || +	    pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { +		dev_err(&pdev->dev, +			"Missing or misconfigured CSR BAR0, aborting.\n"); +		err = -ENODEV; +		goto err_disable_pdev; +	} + +	/* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ +	if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || +	    pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || +	    pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { +		dev_err(&pdev->dev, +			"Missing or misconfigured Doorbell BAR1, aborting.\n"); +		err = -ENODEV; +		goto err_disable_pdev; +	} + +	/* +	 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address +	 * space. +	 * NOTE: BAR_2 and BAR_4 are not used by this version of driver. +	 * It may be a good idea to keep them disabled using HW configuration +	 * to save PCI memory space. +	 */ +	if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) && +	    (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) { +		dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n"); +	} + +	if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) && +	    (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) { +		dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n"); +	} + +	err = pci_request_regions(pdev, DRV_NAME); +	if (err) { +		dev_err(&pdev->dev, "Cannot obtain PCI resources, " +			"aborting.\n"); +		goto err_disable_pdev; +	} + +	pci_set_master(pdev); + +	priv->regs = pci_ioremap_bar(pdev, BAR_0); +	if (!priv->regs) { +		dev_err(&pdev->dev, +			"Unable to map device registers space, aborting\n"); +		err = -ENOMEM; +		goto err_free_res; +	} + +	priv->odb_base = pci_ioremap_bar(pdev, BAR_1); +	if (!priv->odb_base) { +		dev_err(&pdev->dev, +			"Unable to map outbound doorbells space, aborting\n"); +		err = -ENOMEM; +		goto err_unmap_bars; +	} + +	/* Configure DMA attributes. */ +	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { +		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); +		if (err) { +			dev_info(&pdev->dev, "Unable to set DMA mask\n"); +			goto err_unmap_bars; +		} + +		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) +			dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); +	} else { +		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); +		if (err) +			dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); +	} + +	BUG_ON(!pci_is_pcie(pdev)); + +	/* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ +	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, +		PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | +		PCI_EXP_DEVCTL_NOSNOOP_EN, +		0x2 << MAX_READ_REQUEST_SZ_SHIFT); + +	/* Adjust PCIe completion timeout. */ +	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); + +	/* +	 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block +	 */ +	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); +	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, +						TSI721_MSIXTBL_OFFSET); +	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, +						TSI721_MSIXPBA_OFFSET); +	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); +	/* End of FIXUP */ + +	tsi721_disable_ints(priv); + +	tsi721_init_pc2sr_mapping(priv); +	tsi721_init_sr2pc_mapping(priv); + +	if (tsi721_bdma_maint_init(priv)) { +		dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); +		err = -ENOMEM; +		goto err_unmap_bars; +	} + +	err = tsi721_doorbell_init(priv); +	if (err) +		goto err_free_bdma; + +	tsi721_port_write_init(priv); + +	err = tsi721_messages_init(priv); +	if (err) +		goto err_free_consistent; + +	err = tsi721_setup_mport(priv); +	if (err) +		goto err_free_consistent; + +	return 0; + +err_free_consistent: +	tsi721_doorbell_free(priv); +err_free_bdma: +	tsi721_bdma_maint_free(priv); +err_unmap_bars: +	if (priv->regs) +		iounmap(priv->regs); +	if (priv->odb_base) +		iounmap(priv->odb_base); +err_free_res: +	pci_release_regions(pdev); +	pci_clear_master(pdev); +err_disable_pdev: +	pci_disable_device(pdev); +err_clean: +	kfree(priv); +err_exit: +	return err; +} + +static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = { +	{ PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, +	{ 0, }	/* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); + +static struct pci_driver tsi721_driver = { +	.name		= "tsi721", +	.id_table	= tsi721_pci_tbl, +	.probe		= tsi721_probe, +}; + +static int __init tsi721_init(void) +{ +	return pci_register_driver(&tsi721_driver); +} + +device_initcall(tsi721_init); + +MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h new file mode 100644 index 00000000000..0305675270e --- /dev/null +++ b/drivers/rapidio/devices/tsi721.h @@ -0,0 +1,857 @@ +/* + * Tsi721 PCIExpress-to-SRIO bridge definitions + * + * Copyright 2011, Integrated Device Technology, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA  02111-1307, USA. + */ + +#ifndef __TSI721_H +#define __TSI721_H + +#define DRV_NAME	"tsi721" + +#define DEFAULT_HOPCOUNT	0xff +#define DEFAULT_DESTID		0xff + +/* PCI device ID */ +#define PCI_DEVICE_ID_TSI721		0x80ab + +#define BAR_0	0 +#define BAR_1	1 +#define BAR_2	2 +#define BAR_4	4 + +#define TSI721_PC2SR_BARS	2 +#define TSI721_PC2SR_WINS	8 +#define TSI721_PC2SR_ZONES	8 +#define TSI721_MAINT_WIN	0 /* Window for outbound maintenance requests */ +#define IDB_QUEUE		0 /* Inbound Doorbell Queue to use */ +#define IDB_QSIZE		512 /* Inbound Doorbell Queue size */ + +/* Memory space sizes */ +#define TSI721_REG_SPACE_SIZE		(512 * 1024) /* 512K */ +#define TSI721_DB_WIN_SIZE		(16 * 1024 * 1024) /* 16MB */ + +#define  RIO_TT_CODE_8		0x00000000 +#define  RIO_TT_CODE_16		0x00000001 + +#define TSI721_DMA_MAXCH	8 +#define TSI721_DMA_MINSTSSZ	32 +#define TSI721_DMA_STSBLKSZ	8 + +#define TSI721_SRIO_MAXCH	8 + +#define DBELL_SID(buf)		(((u8)buf[2] << 8) | (u8)buf[3]) +#define DBELL_TID(buf)		(((u8)buf[4] << 8) | (u8)buf[5]) +#define DBELL_INF(buf)		(((u8)buf[0] << 8) | (u8)buf[1]) + +#define TSI721_RIO_PW_MSG_SIZE	16  /* Tsi721 saves only 16 bytes of PW msg */ + +/* Register definitions */ + +/* + * Registers in PCIe configuration space + */ + +#define TSI721_PCIECFG_MSIXTBL	0x0a4 +#define TSI721_MSIXTBL_OFFSET	0x2c000 +#define TSI721_PCIECFG_MSIXPBA	0x0a8 +#define TSI721_MSIXPBA_OFFSET	0x2a000 +#define TSI721_PCIECFG_EPCTL	0x400 + +#define MAX_READ_REQUEST_SZ_SHIFT	12 + +/* + * Event Management Registers + */ + +#define TSI721_RIO_EM_INT_STAT		0x10910 +#define TSI721_RIO_EM_INT_STAT_PW_RX	0x00010000 + +#define TSI721_RIO_EM_INT_ENABLE	0x10914 +#define TSI721_RIO_EM_INT_ENABLE_PW_RX	0x00010000 + +#define TSI721_RIO_EM_DEV_INT_EN	0x10930 +#define TSI721_RIO_EM_DEV_INT_EN_INT	0x00000001 + +/* + * Port-Write Block Registers + */ + +#define TSI721_RIO_PW_CTL		0x10a04 +#define TSI721_RIO_PW_CTL_PW_TIMER	0xf0000000 +#define TSI721_RIO_PW_CTL_PWT_DIS	(0 << 28) +#define TSI721_RIO_PW_CTL_PWT_103	(1 << 28) +#define TSI721_RIO_PW_CTL_PWT_205	(1 << 29) +#define TSI721_RIO_PW_CTL_PWT_410	(1 << 30) +#define TSI721_RIO_PW_CTL_PWT_820	(1 << 31) +#define TSI721_RIO_PW_CTL_PWC_MODE	0x01000000 +#define TSI721_RIO_PW_CTL_PWC_CONT	0x00000000 +#define TSI721_RIO_PW_CTL_PWC_REL	0x01000000 + +#define TSI721_RIO_PW_RX_STAT		0x10a10 +#define TSI721_RIO_PW_RX_STAT_WR_SIZE	0x0000f000 +#define TSI_RIO_PW_RX_STAT_WDPTR	0x00000100 +#define TSI721_RIO_PW_RX_STAT_PW_SHORT	0x00000008 +#define TSI721_RIO_PW_RX_STAT_PW_TRUNC	0x00000004 +#define TSI721_RIO_PW_RX_STAT_PW_DISC	0x00000002 +#define TSI721_RIO_PW_RX_STAT_PW_VAL	0x00000001 + +#define TSI721_RIO_PW_RX_CAPT(x)	(0x10a20 + (x)*4) + +/* + * Inbound Doorbells + */ + +#define TSI721_IDB_ENTRY_SIZE	64 + +#define TSI721_IDQ_CTL(x)	(0x20000 + (x) * 0x1000) +#define TSI721_IDQ_SUSPEND	0x00000002 +#define TSI721_IDQ_INIT		0x00000001 + +#define TSI721_IDQ_STS(x)	(0x20004 + (x) * 0x1000) +#define TSI721_IDQ_RUN		0x00200000 + +#define TSI721_IDQ_MASK(x)	(0x20008 + (x) * 0x1000) +#define TSI721_IDQ_MASK_MASK	0xffff0000 +#define TSI721_IDQ_MASK_PATT	0x0000ffff + +#define TSI721_IDQ_RP(x)	(0x2000c + (x) * 0x1000) +#define TSI721_IDQ_RP_PTR	0x0007ffff + +#define TSI721_IDQ_WP(x)	(0x20010 + (x) * 0x1000) +#define TSI721_IDQ_WP_PTR	0x0007ffff + +#define TSI721_IDQ_BASEL(x)	(0x20014 + (x) * 0x1000) +#define TSI721_IDQ_BASEL_ADDR	0xffffffc0 +#define TSI721_IDQ_BASEU(x)	(0x20018 + (x) * 0x1000) +#define TSI721_IDQ_SIZE(x)	(0x2001c + (x) * 0x1000) +#define TSI721_IDQ_SIZE_VAL(size)	(__fls(size) - 4) +#define TSI721_IDQ_SIZE_MIN	512 +#define TSI721_IDQ_SIZE_MAX	(512 * 1024) + +#define TSI721_SR_CHINT(x)	(0x20040 + (x) * 0x1000) +#define TSI721_SR_CHINTE(x)	(0x20044 + (x) * 0x1000) +#define TSI721_SR_CHINTSET(x)	(0x20048 + (x) * 0x1000) +#define TSI721_SR_CHINT_ODBOK	0x00000020 +#define TSI721_SR_CHINT_IDBQRCV	0x00000010 +#define TSI721_SR_CHINT_SUSP	0x00000008 +#define TSI721_SR_CHINT_ODBTO	0x00000004 +#define TSI721_SR_CHINT_ODBRTRY	0x00000002 +#define TSI721_SR_CHINT_ODBERR	0x00000001 +#define TSI721_SR_CHINT_ALL	0x0000003f + +#define TSI721_IBWIN_NUM	8 + +#define TSI721_IBWIN_LB(x)	(0x29000 + (x) * 0x20) +#define TSI721_IBWIN_LB_BA	0xfffff000 +#define TSI721_IBWIN_LB_WEN	0x00000001 + +#define TSI721_IBWIN_UB(x)	(0x29004 + (x) * 0x20) +#define TSI721_IBWIN_SZ(x)	(0x29008 + (x) * 0x20) +#define TSI721_IBWIN_SZ_SIZE	0x00001f00 +#define TSI721_IBWIN_SIZE(size)	(__fls(size) - 12) + +#define TSI721_IBWIN_TLA(x)	(0x2900c + (x) * 0x20) +#define TSI721_IBWIN_TLA_ADD	0xfffff000 +#define TSI721_IBWIN_TUA(x)	(0x29010 + (x) * 0x20) + +#define TSI721_SR2PC_GEN_INTE	0x29800 +#define TSI721_SR2PC_PWE	0x29804 +#define TSI721_SR2PC_GEN_INT	0x29808 + +#define TSI721_DEV_INTE		0x29840 +#define TSI721_DEV_INT		0x29844 +#define TSI721_DEV_INTSET	0x29848 +#define TSI721_DEV_INT_BDMA_CH	0x00002000 +#define TSI721_DEV_INT_BDMA_NCH	0x00001000 +#define TSI721_DEV_INT_SMSG_CH	0x00000800 +#define TSI721_DEV_INT_SMSG_NCH	0x00000400 +#define TSI721_DEV_INT_SR2PC_CH	0x00000200 +#define TSI721_DEV_INT_SRIO	0x00000020 + +#define TSI721_DEV_CHAN_INTE	0x2984c +#define TSI721_DEV_CHAN_INT	0x29850 + +#define TSI721_INT_SR2PC_CHAN_M	0xff000000 +#define TSI721_INT_SR2PC_CHAN(x) (1 << (24 + (x))) +#define TSI721_INT_IMSG_CHAN_M	0x00ff0000 +#define TSI721_INT_IMSG_CHAN(x)	(1 << (16 + (x))) +#define TSI721_INT_OMSG_CHAN_M	0x0000ff00 +#define TSI721_INT_OMSG_CHAN(x)	(1 << (8 + (x))) +#define TSI721_INT_BDMA_CHAN_M	0x000000ff +#define TSI721_INT_BDMA_CHAN(x)	(1 << (x)) + +/* + * PC2SR block registers + */ +#define TSI721_OBWIN_NUM	TSI721_PC2SR_WINS + +#define TSI721_OBWINLB(x)	(0x40000 + (x) * 0x20) +#define TSI721_OBWINLB_BA	0xffff8000 +#define TSI721_OBWINLB_WEN	0x00000001 + +#define TSI721_OBWINUB(x)	(0x40004 + (x) * 0x20) + +#define TSI721_OBWINSZ(x)	(0x40008 + (x) * 0x20) +#define TSI721_OBWINSZ_SIZE	0x00001f00 +#define TSI721_OBWIN_SIZE(size)	(__fls(size) - 15) + +#define TSI721_ZONE_SEL		0x41300 +#define TSI721_ZONE_SEL_RD_WRB	0x00020000 +#define TSI721_ZONE_SEL_GO	0x00010000 +#define TSI721_ZONE_SEL_WIN	0x00000038 +#define TSI721_ZONE_SEL_ZONE	0x00000007 + +#define TSI721_LUT_DATA0	0x41304 +#define TSI721_LUT_DATA0_ADD	0xfffff000 +#define TSI721_LUT_DATA0_RDTYPE	0x00000f00 +#define TSI721_LUT_DATA0_NREAD	0x00000100 +#define TSI721_LUT_DATA0_MNTRD	0x00000200 +#define TSI721_LUT_DATA0_RDCRF	0x00000020 +#define TSI721_LUT_DATA0_WRCRF	0x00000010 +#define TSI721_LUT_DATA0_WRTYPE	0x0000000f +#define TSI721_LUT_DATA0_NWR	0x00000001 +#define TSI721_LUT_DATA0_MNTWR	0x00000002 +#define TSI721_LUT_DATA0_NWR_R	0x00000004 + +#define TSI721_LUT_DATA1	0x41308 + +#define TSI721_LUT_DATA2	0x4130c +#define TSI721_LUT_DATA2_HC	0xff000000 +#define TSI721_LUT_DATA2_ADD65	0x000c0000 +#define TSI721_LUT_DATA2_TT	0x00030000 +#define TSI721_LUT_DATA2_DSTID	0x0000ffff + +#define TSI721_PC2SR_INTE	0x41310 + +#define TSI721_DEVCTL		0x48004 +#define TSI721_DEVCTL_SRBOOT_CMPL	0x00000004 + +#define TSI721_I2C_INT_ENABLE	0x49120 + +/* + * Block DMA Engine Registers + *   x = 0..7 + */ + +#define TSI721_DMAC_BASE(x)	(0x51000 + (x) * 0x1000) + +#define TSI721_DMAC_DWRCNT	0x000 +#define TSI721_DMAC_DRDCNT	0x004 + +#define TSI721_DMAC_CTL		0x008 +#define TSI721_DMAC_CTL_SUSP	0x00000002 +#define TSI721_DMAC_CTL_INIT	0x00000001 + +#define TSI721_DMAC_INT		0x00c +#define TSI721_DMAC_INT_STFULL	0x00000010 +#define TSI721_DMAC_INT_DONE	0x00000008 +#define TSI721_DMAC_INT_SUSP	0x00000004 +#define TSI721_DMAC_INT_ERR	0x00000002 +#define TSI721_DMAC_INT_IOFDONE	0x00000001 +#define TSI721_DMAC_INT_ALL	0x0000001f + +#define TSI721_DMAC_INTSET	0x010 + +#define TSI721_DMAC_STS		0x014 +#define TSI721_DMAC_STS_ABORT	0x00400000 +#define TSI721_DMAC_STS_RUN	0x00200000 +#define TSI721_DMAC_STS_CS	0x001f0000 + +#define TSI721_DMAC_INTE	0x018 + +#define TSI721_DMAC_DPTRL	0x024 +#define TSI721_DMAC_DPTRL_MASK	0xffffffe0 + +#define TSI721_DMAC_DPTRH	0x028 + +#define TSI721_DMAC_DSBL	0x02c +#define TSI721_DMAC_DSBL_MASK	0xffffffc0 + +#define TSI721_DMAC_DSBH	0x030 + +#define TSI721_DMAC_DSSZ	0x034 +#define TSI721_DMAC_DSSZ_SIZE_M	0x0000000f +#define TSI721_DMAC_DSSZ_SIZE(size)	(__fls(size) - 4) + +#define TSI721_DMAC_DSRP	0x038 +#define TSI721_DMAC_DSRP_MASK	0x0007ffff + +#define TSI721_DMAC_DSWP	0x03c +#define TSI721_DMAC_DSWP_MASK	0x0007ffff + +#define TSI721_BDMA_INTE	0x5f000 + +/* + * Messaging definitions + */ +#define TSI721_MSG_BUFFER_SIZE		RIO_MAX_MSG_SIZE +#define TSI721_MSG_MAX_SIZE		RIO_MAX_MSG_SIZE +#define TSI721_IMSG_MAXCH		8 +#define TSI721_IMSG_CHNUM		TSI721_IMSG_MAXCH +#define TSI721_IMSGD_MIN_RING_SIZE	32 +#define TSI721_IMSGD_RING_SIZE		512 + +#define TSI721_OMSG_CHNUM		4 /* One channel per MBOX */ +#define TSI721_OMSGD_MIN_RING_SIZE	32 +#define TSI721_OMSGD_RING_SIZE		512 + +/* + * Outbound Messaging Engine Registers + *   x = 0..7 + */ + +#define TSI721_OBDMAC_DWRCNT(x)		(0x61000 + (x) * 0x1000) + +#define TSI721_OBDMAC_DRDCNT(x)		(0x61004 + (x) * 0x1000) + +#define TSI721_OBDMAC_CTL(x)		(0x61008 + (x) * 0x1000) +#define TSI721_OBDMAC_CTL_MASK		0x00000007 +#define TSI721_OBDMAC_CTL_RETRY_THR	0x00000004 +#define TSI721_OBDMAC_CTL_SUSPEND	0x00000002 +#define TSI721_OBDMAC_CTL_INIT		0x00000001 + +#define TSI721_OBDMAC_INT(x)		(0x6100c + (x) * 0x1000) +#define TSI721_OBDMAC_INTSET(x)		(0x61010 + (x) * 0x1000) +#define TSI721_OBDMAC_INTE(x)		(0x61018 + (x) * 0x1000) +#define TSI721_OBDMAC_INT_MASK		0x0000001F +#define TSI721_OBDMAC_INT_ST_FULL	0x00000010 +#define TSI721_OBDMAC_INT_DONE		0x00000008 +#define TSI721_OBDMAC_INT_SUSPENDED	0x00000004 +#define TSI721_OBDMAC_INT_ERROR		0x00000002 +#define TSI721_OBDMAC_INT_IOF_DONE	0x00000001 +#define TSI721_OBDMAC_INT_ALL		TSI721_OBDMAC_INT_MASK + +#define TSI721_OBDMAC_STS(x)		(0x61014 + (x) * 0x1000) +#define TSI721_OBDMAC_STS_MASK		0x007f0000 +#define TSI721_OBDMAC_STS_ABORT		0x00400000 +#define TSI721_OBDMAC_STS_RUN		0x00200000 +#define TSI721_OBDMAC_STS_CS		0x001f0000 + +#define TSI721_OBDMAC_PWE(x)		(0x6101c + (x) * 0x1000) +#define TSI721_OBDMAC_PWE_MASK		0x00000002 +#define TSI721_OBDMAC_PWE_ERROR_EN	0x00000002 + +#define TSI721_OBDMAC_DPTRL(x)		(0x61020 + (x) * 0x1000) +#define TSI721_OBDMAC_DPTRL_MASK	0xfffffff0 + +#define TSI721_OBDMAC_DPTRH(x)		(0x61024 + (x) * 0x1000) +#define TSI721_OBDMAC_DPTRH_MASK	0xffffffff + +#define TSI721_OBDMAC_DSBL(x)		(0x61040 + (x) * 0x1000) +#define TSI721_OBDMAC_DSBL_MASK		0xffffffc0 + +#define TSI721_OBDMAC_DSBH(x)		(0x61044 + (x) * 0x1000) +#define TSI721_OBDMAC_DSBH_MASK		0xffffffff + +#define TSI721_OBDMAC_DSSZ(x)		(0x61048 + (x) * 0x1000) +#define TSI721_OBDMAC_DSSZ_MASK		0x0000000f + +#define TSI721_OBDMAC_DSRP(x)		(0x6104c + (x) * 0x1000) +#define TSI721_OBDMAC_DSRP_MASK		0x0007ffff + +#define TSI721_OBDMAC_DSWP(x)		(0x61050 + (x) * 0x1000) +#define TSI721_OBDMAC_DSWP_MASK		0x0007ffff + +#define TSI721_RQRPTO			0x60010 +#define TSI721_RQRPTO_MASK		0x00ffffff +#define TSI721_RQRPTO_VAL		400	/* Response TO value */ + +/* + * Inbound Messaging Engine Registers + *   x = 0..7 + */ + +#define TSI721_IB_DEVID_GLOBAL		0xffff +#define TSI721_IBDMAC_FQBL(x)		(0x61200 + (x) * 0x1000) +#define TSI721_IBDMAC_FQBL_MASK		0xffffffc0 + +#define TSI721_IBDMAC_FQBH(x)		(0x61204 + (x) * 0x1000) +#define TSI721_IBDMAC_FQBH_MASK		0xffffffff + +#define TSI721_IBDMAC_FQSZ_ENTRY_INX	TSI721_IMSGD_RING_SIZE +#define TSI721_IBDMAC_FQSZ(x)		(0x61208 + (x) * 0x1000) +#define TSI721_IBDMAC_FQSZ_MASK		0x0000000f + +#define TSI721_IBDMAC_FQRP(x)		(0x6120c + (x) * 0x1000) +#define TSI721_IBDMAC_FQRP_MASK		0x0007ffff + +#define TSI721_IBDMAC_FQWP(x)		(0x61210 + (x) * 0x1000) +#define TSI721_IBDMAC_FQWP_MASK		0x0007ffff + +#define TSI721_IBDMAC_FQTH(x)		(0x61214 + (x) * 0x1000) +#define TSI721_IBDMAC_FQTH_MASK		0x0007ffff + +#define TSI721_IB_DEVID			0x60020 +#define TSI721_IB_DEVID_MASK		0x0000ffff + +#define TSI721_IBDMAC_CTL(x)		(0x61240 + (x) * 0x1000) +#define TSI721_IBDMAC_CTL_MASK		0x00000003 +#define TSI721_IBDMAC_CTL_SUSPEND	0x00000002 +#define TSI721_IBDMAC_CTL_INIT		0x00000001 + +#define TSI721_IBDMAC_STS(x)		(0x61244 + (x) * 0x1000) +#define TSI721_IBDMAC_STS_MASK		0x007f0000 +#define TSI721_IBSMAC_STS_ABORT		0x00400000 +#define TSI721_IBSMAC_STS_RUN		0x00200000 +#define TSI721_IBSMAC_STS_CS		0x001f0000 + +#define TSI721_IBDMAC_INT(x)		(0x61248 + (x) * 0x1000) +#define TSI721_IBDMAC_INTSET(x)		(0x6124c + (x) * 0x1000) +#define TSI721_IBDMAC_INTE(x)		(0x61250 + (x) * 0x1000) +#define TSI721_IBDMAC_INT_MASK		0x0000100f +#define TSI721_IBDMAC_INT_SRTO		0x00001000 +#define TSI721_IBDMAC_INT_SUSPENDED	0x00000008 +#define TSI721_IBDMAC_INT_PC_ERROR	0x00000004 +#define TSI721_IBDMAC_INT_FQ_LOW	0x00000002 +#define TSI721_IBDMAC_INT_DQ_RCV	0x00000001 +#define TSI721_IBDMAC_INT_ALL		TSI721_IBDMAC_INT_MASK + +#define TSI721_IBDMAC_PWE(x)		(0x61254 + (x) * 0x1000) +#define TSI721_IBDMAC_PWE_MASK		0x00001700 +#define TSI721_IBDMAC_PWE_SRTO		0x00001000 +#define TSI721_IBDMAC_PWE_ILL_FMT	0x00000400 +#define TSI721_IBDMAC_PWE_ILL_DEC	0x00000200 +#define TSI721_IBDMAC_PWE_IMP_SP	0x00000100 + +#define TSI721_IBDMAC_DQBL(x)		(0x61300 + (x) * 0x1000) +#define TSI721_IBDMAC_DQBL_MASK		0xffffffc0 +#define TSI721_IBDMAC_DQBL_ADDR		0xffffffc0 + +#define TSI721_IBDMAC_DQBH(x)		(0x61304 + (x) * 0x1000) +#define TSI721_IBDMAC_DQBH_MASK		0xffffffff + +#define TSI721_IBDMAC_DQRP(x)		(0x61308 + (x) * 0x1000) +#define TSI721_IBDMAC_DQRP_MASK		0x0007ffff + +#define TSI721_IBDMAC_DQWR(x)		(0x6130c + (x) * 0x1000) +#define TSI721_IBDMAC_DQWR_MASK		0x0007ffff + +#define TSI721_IBDMAC_DQSZ(x)		(0x61314 + (x) * 0x1000) +#define TSI721_IBDMAC_DQSZ_MASK		0x0000000f + +/* + * Messaging Engine Interrupts + */ + +#define TSI721_SMSG_PWE			0x6a004 + +#define TSI721_SMSG_INTE		0x6a000 +#define TSI721_SMSG_INT			0x6a008 +#define TSI721_SMSG_INTSET		0x6a010 +#define TSI721_SMSG_INT_MASK		0x0086ffff +#define TSI721_SMSG_INT_UNS_RSP		0x00800000 +#define TSI721_SMSG_INT_ECC_NCOR	0x00040000 +#define TSI721_SMSG_INT_ECC_COR		0x00020000 +#define TSI721_SMSG_INT_ECC_NCOR_CH	0x0000ff00 +#define TSI721_SMSG_INT_ECC_COR_CH	0x000000ff + +#define TSI721_SMSG_ECC_LOG		0x6a014 +#define TSI721_SMSG_ECC_LOG_MASK	0x00070007 +#define TSI721_SMSG_ECC_LOG_ECC_NCOR_M	0x00070000 +#define TSI721_SMSG_ECC_LOG_ECC_COR_M	0x00000007 + +#define TSI721_RETRY_GEN_CNT		0x6a100 +#define TSI721_RETRY_GEN_CNT_MASK	0xffffffff + +#define TSI721_RETRY_RX_CNT		0x6a104 +#define TSI721_RETRY_RX_CNT_MASK	0xffffffff + +#define TSI721_SMSG_ECC_COR_LOG(x)	(0x6a300 + (x) * 4) +#define TSI721_SMSG_ECC_COR_LOG_MASK	0x000000ff + +#define TSI721_SMSG_ECC_NCOR(x)		(0x6a340 + (x) * 4) +#define TSI721_SMSG_ECC_NCOR_MASK	0x000000ff + +/* + * Block DMA Descriptors + */ + +struct tsi721_dma_desc { +	__le32 type_id; + +#define TSI721_DMAD_DEVID	0x0000ffff +#define TSI721_DMAD_CRF		0x00010000 +#define TSI721_DMAD_PRIO	0x00060000 +#define TSI721_DMAD_RTYPE	0x00780000 +#define TSI721_DMAD_IOF		0x08000000 +#define TSI721_DMAD_DTYPE	0xe0000000 + +	__le32 bcount; + +#define TSI721_DMAD_BCOUNT1	0x03ffffff /* if DTYPE == 1 */ +#define TSI721_DMAD_BCOUNT2	0x0000000f /* if DTYPE == 2 */ +#define TSI721_DMAD_TT		0x0c000000 +#define TSI721_DMAD_RADDR0	0xc0000000 + +	union { +		__le32 raddr_lo;	   /* if DTYPE == (1 || 2) */ +		__le32 next_lo;		   /* if DTYPE == 3 */ +	}; + +#define TSI721_DMAD_CFGOFF	0x00ffffff +#define TSI721_DMAD_HOPCNT	0xff000000 + +	union { +		__le32 raddr_hi;	   /* if DTYPE == (1 || 2) */ +		__le32 next_hi;		   /* if DTYPE == 3 */ +	}; + +	union { +		struct {		   /* if DTYPE == 1 */ +			__le32 bufptr_lo; +			__le32 bufptr_hi; +			__le32 s_dist; +			__le32 s_size; +		} t1; +		__le32 data[4];		   /* if DTYPE == 2 */ +		u32    reserved[4];	   /* if DTYPE == 3 */ +	}; +} __aligned(32); + +/* + * Inbound Messaging Descriptor + */ +struct tsi721_imsg_desc { +	__le32 type_id; + +#define TSI721_IMD_DEVID	0x0000ffff +#define TSI721_IMD_CRF		0x00010000 +#define TSI721_IMD_PRIO		0x00060000 +#define TSI721_IMD_TT		0x00180000 +#define TSI721_IMD_DTYPE	0xe0000000 + +	__le32 msg_info; + +#define TSI721_IMD_BCOUNT	0x00000ff8 +#define TSI721_IMD_SSIZE	0x0000f000 +#define TSI721_IMD_LETER	0x00030000 +#define TSI721_IMD_XMBOX	0x003c0000 +#define TSI721_IMD_MBOX		0x00c00000 +#define TSI721_IMD_CS		0x78000000 +#define TSI721_IMD_HO		0x80000000 + +	__le32 bufptr_lo; +	__le32 bufptr_hi; +	u32    reserved[12]; + +} __aligned(64); + +/* + * Outbound Messaging Descriptor + */ +struct tsi721_omsg_desc { +	__le32 type_id; + +#define TSI721_OMD_DEVID	0x0000ffff +#define TSI721_OMD_CRF		0x00010000 +#define TSI721_OMD_PRIO		0x00060000 +#define TSI721_OMD_IOF		0x08000000 +#define TSI721_OMD_DTYPE	0xe0000000 +#define TSI721_OMD_RSRVD	0x17f80000 + +	__le32 msg_info; + +#define TSI721_OMD_BCOUNT	0x00000ff8 +#define TSI721_OMD_SSIZE	0x0000f000 +#define TSI721_OMD_LETER	0x00030000 +#define TSI721_OMD_XMBOX	0x003c0000 +#define TSI721_OMD_MBOX		0x00c00000 +#define TSI721_OMD_TT		0x0c000000 + +	union { +		__le32 bufptr_lo;	/* if DTYPE == 4 */ +		__le32 next_lo;		/* if DTYPE == 5 */ +	}; + +	union { +		__le32 bufptr_hi;	/* if DTYPE == 4 */ +		__le32 next_hi;		/* if DTYPE == 5 */ +	}; + +} __aligned(16); + +struct tsi721_dma_sts { +	__le64	desc_sts[8]; +} __aligned(64); + +struct tsi721_desc_sts_fifo { +	union { +		__le64	da64; +		struct { +			__le32	lo; +			__le32	hi; +		} da32; +	} stat[8]; +} __aligned(64); + +/* Descriptor types for BDMA and Messaging blocks */ +enum dma_dtype { +	DTYPE1 = 1, /* Data Transfer DMA Descriptor */ +	DTYPE2 = 2, /* Immediate Data Transfer DMA Descriptor */ +	DTYPE3 = 3, /* Block Pointer DMA Descriptor */ +	DTYPE4 = 4, /* Outbound Msg DMA Descriptor */ +	DTYPE5 = 5, /* OB Messaging Block Pointer Descriptor */ +	DTYPE6 = 6  /* Inbound Messaging Descriptor */ +}; + +enum dma_rtype { +	NREAD = 0, +	LAST_NWRITE_R = 1, +	ALL_NWRITE = 2, +	ALL_NWRITE_R = 3, +	MAINT_RD = 4, +	MAINT_WR = 5 +}; + +/* + * mport Driver Definitions + */ +#define TSI721_DMA_CHNUM	TSI721_DMA_MAXCH + +#define TSI721_DMACH_MAINT	0	/* DMA channel for maint requests */ +#define TSI721_DMACH_MAINT_NBD	32	/* Number of BDs for maint requests */ + +#define TSI721_DMACH_DMA	1	/* DMA channel for data transfers */ + +#define MSG_DMA_ENTRY_INX_TO_SIZE(x)	((0x10 << (x)) & 0xFFFF0) + +enum tsi721_smsg_int_flag { +	SMSG_INT_NONE		= 0x00000000, +	SMSG_INT_ECC_COR_CH	= 0x000000ff, +	SMSG_INT_ECC_NCOR_CH	= 0x0000ff00, +	SMSG_INT_ECC_COR	= 0x00020000, +	SMSG_INT_ECC_NCOR	= 0x00040000, +	SMSG_INT_UNS_RSP	= 0x00800000, +	SMSG_INT_ALL		= 0x0006ffff +}; + +/* Structures */ + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +#define TSI721_BDMA_BD_RING_SZ	128 +#define TSI721_BDMA_MAX_BCOUNT	(TSI721_DMAD_BCOUNT1 + 1) + +struct tsi721_tx_desc { +	struct dma_async_tx_descriptor	txd; +	struct tsi721_dma_desc		*hw_desc; +	u16				destid; +	/* low 64-bits of 66-bit RIO address */ +	u64				rio_addr; +	/* upper 2-bits of 66-bit RIO address */ +	u8				rio_addr_u; +	u32				bcount; +	bool				interrupt; +	struct list_head		desc_node; +	struct list_head		tx_list; +}; + +struct tsi721_bdma_chan { +	int		id; +	void __iomem	*regs; +	int		bd_num;		/* number of buffer descriptors */ +	void		*bd_base;	/* start of DMA descriptors */ +	dma_addr_t	bd_phys; +	void		*sts_base;	/* start of DMA BD status FIFO */ +	dma_addr_t	sts_phys; +	int		sts_size; +	u32		sts_rdptr; +	u32		wr_count; +	u32		wr_count_next; + +	struct dma_chan		dchan; +	struct tsi721_tx_desc	*tx_desc; +	spinlock_t		lock; +	struct list_head	active_list; +	struct list_head	queue; +	struct list_head	free_list; +	dma_cookie_t		completed_cookie; +	struct tasklet_struct	tasklet; +	bool			active; +}; + +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +struct tsi721_bdma_maint { +	int		ch_id;		/* BDMA channel number */ +	int		bd_num;		/* number of buffer descriptors */ +	void		*bd_base;	/* start of DMA descriptors */ +	dma_addr_t	bd_phys; +	void		*sts_base;	/* start of DMA BD status FIFO */ +	dma_addr_t	sts_phys; +	int		sts_size; +}; + +struct tsi721_imsg_ring { +	u32		size; +	/* VA/PA of data buffers for incoming messages */ +	void		*buf_base; +	dma_addr_t	buf_phys; +	/* VA/PA of circular free buffer list */ +	void		*imfq_base; +	dma_addr_t	imfq_phys; +	/* VA/PA of Inbound message descriptors */ +	void		*imd_base; +	dma_addr_t	imd_phys; +	 /* Inbound Queue buffer pointers */ +	void		*imq_base[TSI721_IMSGD_RING_SIZE]; + +	u32		rx_slot; +	void		*dev_id; +	u32		fq_wrptr; +	u32		desc_rdptr; +	spinlock_t	lock; +}; + +struct tsi721_omsg_ring { +	u32		size; +	/* VA/PA of OB Msg descriptors */ +	void		*omd_base; +	dma_addr_t	omd_phys; +	/* VA/PA of OB Msg data buffers */ +	void		*omq_base[TSI721_OMSGD_RING_SIZE]; +	dma_addr_t	omq_phys[TSI721_OMSGD_RING_SIZE]; +	/* VA/PA of OB Msg descriptor status FIFO */ +	void		*sts_base; +	dma_addr_t	sts_phys; +	u32		sts_size; /* # of allocated status entries */ +	u32		sts_rdptr; + +	u32		tx_slot; +	void		*dev_id; +	u32		wr_count; +	spinlock_t	lock; +}; + +enum tsi721_flags { +	TSI721_USING_MSI	= (1 << 0), +	TSI721_USING_MSIX	= (1 << 1), +	TSI721_IMSGID_SET	= (1 << 2), +}; + +#ifdef CONFIG_PCI_MSI +/* + * MSI-X Table Entries (0 ... 69) + */ +#define TSI721_MSIX_DMACH_DONE(x)	(0 + (x)) +#define TSI721_MSIX_DMACH_INT(x)	(8 + (x)) +#define TSI721_MSIX_BDMA_INT		16 +#define TSI721_MSIX_OMSG_DONE(x)	(17 + (x)) +#define TSI721_MSIX_OMSG_INT(x)		(25 + (x)) +#define TSI721_MSIX_IMSG_DQ_RCV(x)	(33 + (x)) +#define TSI721_MSIX_IMSG_INT(x)		(41 + (x)) +#define TSI721_MSIX_MSG_INT		49 +#define TSI721_MSIX_SR2PC_IDBQ_RCV(x)	(50 + (x)) +#define TSI721_MSIX_SR2PC_CH_INT(x)	(58 + (x)) +#define TSI721_MSIX_SR2PC_INT		66 +#define TSI721_MSIX_PC2SR_INT		67 +#define TSI721_MSIX_SRIO_MAC_INT	68 +#define TSI721_MSIX_I2C_INT		69 + +/* MSI-X vector and init table entry indexes */ +enum tsi721_msix_vect { +	TSI721_VECT_IDB, +	TSI721_VECT_PWRX, /* PW_RX is part of SRIO MAC Interrupt reporting */ +	TSI721_VECT_OMB0_DONE, +	TSI721_VECT_OMB1_DONE, +	TSI721_VECT_OMB2_DONE, +	TSI721_VECT_OMB3_DONE, +	TSI721_VECT_OMB0_INT, +	TSI721_VECT_OMB1_INT, +	TSI721_VECT_OMB2_INT, +	TSI721_VECT_OMB3_INT, +	TSI721_VECT_IMB0_RCV, +	TSI721_VECT_IMB1_RCV, +	TSI721_VECT_IMB2_RCV, +	TSI721_VECT_IMB3_RCV, +	TSI721_VECT_IMB0_INT, +	TSI721_VECT_IMB1_INT, +	TSI721_VECT_IMB2_INT, +	TSI721_VECT_IMB3_INT, +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +	TSI721_VECT_DMA0_DONE, +	TSI721_VECT_DMA1_DONE, +	TSI721_VECT_DMA2_DONE, +	TSI721_VECT_DMA3_DONE, +	TSI721_VECT_DMA4_DONE, +	TSI721_VECT_DMA5_DONE, +	TSI721_VECT_DMA6_DONE, +	TSI721_VECT_DMA7_DONE, +	TSI721_VECT_DMA0_INT, +	TSI721_VECT_DMA1_INT, +	TSI721_VECT_DMA2_INT, +	TSI721_VECT_DMA3_INT, +	TSI721_VECT_DMA4_INT, +	TSI721_VECT_DMA5_INT, +	TSI721_VECT_DMA6_INT, +	TSI721_VECT_DMA7_INT, +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ +	TSI721_VECT_MAX +}; + +#define IRQ_DEVICE_NAME_MAX	64 + +struct msix_irq { +	u16	vector; +	char	irq_name[IRQ_DEVICE_NAME_MAX]; +}; +#endif /* CONFIG_PCI_MSI */ + +struct tsi721_device { +	struct pci_dev	*pdev; +	struct rio_mport *mport; +	u32		flags; +	void __iomem	*regs; +#ifdef CONFIG_PCI_MSI +	struct msix_irq	msix[TSI721_VECT_MAX]; +#endif +	/* Doorbells */ +	void __iomem	*odb_base; +	void		*idb_base; +	dma_addr_t	idb_dma; +	struct work_struct idb_work; +	u32		db_discard_count; + +	/* Inbound Port-Write */ +	struct work_struct pw_work; +	struct kfifo	pw_fifo; +	spinlock_t	pw_fifo_lock; +	u32		pw_discard_count; + +	/* BDMA Engine */ +	struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */ + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +	struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; +#endif + +	/* Inbound Messaging */ +	int		imsg_init[TSI721_IMSG_CHNUM]; +	struct tsi721_imsg_ring imsg_ring[TSI721_IMSG_CHNUM]; + +	/* Outbound Messaging */ +	int		omsg_init[TSI721_OMSG_CHNUM]; +	struct tsi721_omsg_ring	omsg_ring[TSI721_OMSG_CHNUM]; +}; + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan); +extern int tsi721_register_dma(struct tsi721_device *priv); +#endif + +#endif diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c new file mode 100644 index 00000000000..44341dc5b14 --- /dev/null +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -0,0 +1,883 @@ +/* + * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge + * + * Copyright 2011 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA  02111-1307, USA. + */ + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kfifo.h> +#include <linux/delay.h> + +#include "tsi721.h" + +static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) +{ +	return container_of(chan, struct tsi721_bdma_chan, dchan); +} + +static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) +{ +	return container_of(ddev, struct rio_mport, dma)->priv; +} + +static inline +struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) +{ +	return container_of(txd, struct tsi721_tx_desc, txd); +} + +static inline +struct tsi721_tx_desc *tsi721_dma_first_active( +				struct tsi721_bdma_chan *bdma_chan) +{ +	return list_first_entry(&bdma_chan->active_list, +				struct tsi721_tx_desc, desc_node); +} + +static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) +{ +	struct tsi721_dma_desc *bd_ptr; +	struct device *dev = bdma_chan->dchan.device->dev; +	u64		*sts_ptr; +	dma_addr_t	bd_phys; +	dma_addr_t	sts_phys; +	int		sts_size; +	int		bd_num = bdma_chan->bd_num; + +	dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); + +	/* Allocate space for DMA descriptors */ +	bd_ptr = dma_zalloc_coherent(dev, +				bd_num * sizeof(struct tsi721_dma_desc), +				&bd_phys, GFP_KERNEL); +	if (!bd_ptr) +		return -ENOMEM; + +	bdma_chan->bd_phys = bd_phys; +	bdma_chan->bd_base = bd_ptr; + +	dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n", +		bd_ptr, (unsigned long long)bd_phys); + +	/* Allocate space for descriptor status FIFO */ +	sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? +					bd_num : TSI721_DMA_MINSTSSZ; +	sts_size = roundup_pow_of_two(sts_size); +	sts_ptr = dma_zalloc_coherent(dev, +				     sts_size * sizeof(struct tsi721_dma_sts), +				     &sts_phys, GFP_KERNEL); +	if (!sts_ptr) { +		/* Free space allocated for DMA descriptors */ +		dma_free_coherent(dev, +				  bd_num * sizeof(struct tsi721_dma_desc), +				  bd_ptr, bd_phys); +		bdma_chan->bd_base = NULL; +		return -ENOMEM; +	} + +	bdma_chan->sts_phys = sts_phys; +	bdma_chan->sts_base = sts_ptr; +	bdma_chan->sts_size = sts_size; + +	dev_dbg(dev, +		"desc status FIFO @ %p (phys = %llx) size=0x%x\n", +		sts_ptr, (unsigned long long)sts_phys, sts_size); + +	/* Initialize DMA descriptors ring */ +	bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); +	bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & +						 TSI721_DMAC_DPTRL_MASK); +	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); + +	/* Setup DMA descriptor pointers */ +	iowrite32(((u64)bd_phys >> 32), +		bdma_chan->regs + TSI721_DMAC_DPTRH); +	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), +		bdma_chan->regs + TSI721_DMAC_DPTRL); + +	/* Setup descriptor status FIFO */ +	iowrite32(((u64)sts_phys >> 32), +		bdma_chan->regs + TSI721_DMAC_DSBH); +	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), +		bdma_chan->regs + TSI721_DMAC_DSBL); +	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), +		bdma_chan->regs + TSI721_DMAC_DSSZ); + +	/* Clear interrupt bits */ +	iowrite32(TSI721_DMAC_INT_ALL, +		bdma_chan->regs + TSI721_DMAC_INT); + +	ioread32(bdma_chan->regs + TSI721_DMAC_INT); + +	/* Toggle DMA channel initialization */ +	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL); +	ioread32(bdma_chan->regs + TSI721_DMAC_CTL); +	bdma_chan->wr_count = bdma_chan->wr_count_next = 0; +	bdma_chan->sts_rdptr = 0; +	udelay(10); + +	return 0; +} + +static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) +{ +	u32 ch_stat; + +	if (bdma_chan->bd_base == NULL) +		return 0; + +	/* Check if DMA channel still running */ +	ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); +	if (ch_stat & TSI721_DMAC_STS_RUN) +		return -EFAULT; + +	/* Put DMA channel into init state */ +	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL); + +	/* Free space allocated for DMA descriptors */ +	dma_free_coherent(bdma_chan->dchan.device->dev, +		bdma_chan->bd_num * sizeof(struct tsi721_dma_desc), +		bdma_chan->bd_base, bdma_chan->bd_phys); +	bdma_chan->bd_base = NULL; + +	/* Free space allocated for status FIFO */ +	dma_free_coherent(bdma_chan->dchan.device->dev, +		bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), +		bdma_chan->sts_base, bdma_chan->sts_phys); +	bdma_chan->sts_base = NULL; +	return 0; +} + +static void +tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) +{ +	if (enable) { +		/* Clear pending BDMA channel interrupts */ +		iowrite32(TSI721_DMAC_INT_ALL, +			bdma_chan->regs + TSI721_DMAC_INT); +		ioread32(bdma_chan->regs + TSI721_DMAC_INT); +		/* Enable BDMA channel interrupts */ +		iowrite32(TSI721_DMAC_INT_ALL, +			bdma_chan->regs + TSI721_DMAC_INTE); +	} else { +		/* Disable BDMA channel interrupts */ +		iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); +		/* Clear pending BDMA channel interrupts */ +		iowrite32(TSI721_DMAC_INT_ALL, +			bdma_chan->regs + TSI721_DMAC_INT); +	} + +} + +static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) +{ +	u32 sts; + +	sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); +	return ((sts & TSI721_DMAC_STS_RUN) == 0); +} + +void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) +{ +	/* Disable BDMA channel interrupts */ +	iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); +	if (bdma_chan->active) +		tasklet_schedule(&bdma_chan->tasklet); +} + +#ifdef CONFIG_PCI_MSI +/** + * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (BDMA channel structure) + * + * Handles BDMA channel interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) +{ +	struct tsi721_bdma_chan *bdma_chan = ptr; + +	tsi721_bdma_handler(bdma_chan); +	return IRQ_HANDLED; +} +#endif /* CONFIG_PCI_MSI */ + +/* Must be called with the spinlock held */ +static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) +{ +	if (!tsi721_dma_is_idle(bdma_chan)) { +		dev_err(bdma_chan->dchan.device->dev, +			"BUG: Attempt to start non-idle channel\n"); +		return; +	} + +	if (bdma_chan->wr_count == bdma_chan->wr_count_next) { +		dev_err(bdma_chan->dchan.device->dev, +			"BUG: Attempt to start DMA with no BDs ready\n"); +		return; +	} + +	dev_dbg(bdma_chan->dchan.device->dev, +		"tx_chan: %p, chan: %d, regs: %p\n", +		bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs); + +	iowrite32(bdma_chan->wr_count_next, +		bdma_chan->regs + TSI721_DMAC_DWRCNT); +	ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); + +	bdma_chan->wr_count = bdma_chan->wr_count_next; +} + +static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan, +			    struct tsi721_tx_desc *desc) +{ +	dev_dbg(bdma_chan->dchan.device->dev, +		"Put desc: %p into free list\n", desc); + +	if (desc) { +		spin_lock_bh(&bdma_chan->lock); +		list_splice_init(&desc->tx_list, &bdma_chan->free_list); +		list_add(&desc->desc_node, &bdma_chan->free_list); +		bdma_chan->wr_count_next = bdma_chan->wr_count; +		spin_unlock_bh(&bdma_chan->lock); +	} +} + +static +struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) +{ +	struct tsi721_tx_desc *tx_desc, *_tx_desc; +	struct tsi721_tx_desc *ret = NULL; +	int i; + +	spin_lock_bh(&bdma_chan->lock); +	list_for_each_entry_safe(tx_desc, _tx_desc, +				 &bdma_chan->free_list, desc_node) { +		if (async_tx_test_ack(&tx_desc->txd)) { +			list_del(&tx_desc->desc_node); +			ret = tx_desc; +			break; +		} +		dev_dbg(bdma_chan->dchan.device->dev, +			"desc %p not ACKed\n", tx_desc); +	} + +	if (ret == NULL) { +		dev_dbg(bdma_chan->dchan.device->dev, +			"%s: unable to obtain tx descriptor\n", __func__); +		goto err_out; +	} + +	i = bdma_chan->wr_count_next % bdma_chan->bd_num; +	if (i == bdma_chan->bd_num - 1) { +		i = 0; +		bdma_chan->wr_count_next++; /* skip link descriptor */ +	} + +	bdma_chan->wr_count_next++; +	tx_desc->txd.phys = bdma_chan->bd_phys + +				i * sizeof(struct tsi721_dma_desc); +	tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; +err_out: +	spin_unlock_bh(&bdma_chan->lock); + +	return ret; +} + +static int +tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg, +	enum dma_rtype rtype, u32 sys_size) +{ +	struct tsi721_dma_desc *bd_ptr = desc->hw_desc; +	u64 rio_addr; + +	/* Initialize DMA descriptor */ +	bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | +					(rtype << 19) | desc->destid); +	bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | +				     (sys_size << 26)); +	rio_addr = (desc->rio_addr >> 2) | +				((u64)(desc->rio_addr_u & 0x3) << 62); +	bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); +	bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); +	bd_ptr->t1.bufptr_lo = cpu_to_le32( +					(u64)sg_dma_address(sg) & 0xffffffff); +	bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); +	bd_ptr->t1.s_dist = 0; +	bd_ptr->t1.s_size = 0; + +	return 0; +} + +static int +tsi721_desc_fill_end(struct tsi721_tx_desc *desc) +{ +	struct tsi721_dma_desc *bd_ptr = desc->hw_desc; + +	/* Update DMA descriptor */ +	if (desc->interrupt) +		bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); +	bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1); + +	return 0; +} + + +static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, +				      struct tsi721_tx_desc *desc) +{ +	struct dma_async_tx_descriptor *txd = &desc->txd; +	dma_async_tx_callback callback = txd->callback; +	void *param = txd->callback_param; + +	list_splice_init(&desc->tx_list, &bdma_chan->free_list); +	list_move(&desc->desc_node, &bdma_chan->free_list); +	bdma_chan->completed_cookie = txd->cookie; + +	if (callback) +		callback(param); +} + +static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan) +{ +	struct tsi721_tx_desc *desc, *_d; +	LIST_HEAD(list); + +	BUG_ON(!tsi721_dma_is_idle(bdma_chan)); + +	if (!list_empty(&bdma_chan->queue)) +		tsi721_start_dma(bdma_chan); + +	list_splice_init(&bdma_chan->active_list, &list); +	list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); + +	list_for_each_entry_safe(desc, _d, &list, desc_node) +		tsi721_dma_chain_complete(bdma_chan, desc); +} + +static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) +{ +	u32 srd_ptr; +	u64 *sts_ptr; +	int i, j; + +	/* Check and clear descriptor status FIFO entries */ +	srd_ptr = bdma_chan->sts_rdptr; +	sts_ptr = bdma_chan->sts_base; +	j = srd_ptr * 8; +	while (sts_ptr[j]) { +		for (i = 0; i < 8 && sts_ptr[j]; i++, j++) +			sts_ptr[j] = 0; + +		++srd_ptr; +		srd_ptr %= bdma_chan->sts_size; +		j = srd_ptr * 8; +	} + +	iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); +	bdma_chan->sts_rdptr = srd_ptr; +} + +static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) +{ +	if (list_empty(&bdma_chan->active_list) || +		list_is_singular(&bdma_chan->active_list)) { +		dev_dbg(bdma_chan->dchan.device->dev, +			"%s: Active_list empty\n", __func__); +		tsi721_dma_complete_all(bdma_chan); +	} else { +		dev_dbg(bdma_chan->dchan.device->dev, +			"%s: Active_list NOT empty\n", __func__); +		tsi721_dma_chain_complete(bdma_chan, +					tsi721_dma_first_active(bdma_chan)); +		tsi721_start_dma(bdma_chan); +	} +} + +static void tsi721_dma_tasklet(unsigned long data) +{ +	struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; +	u32 dmac_int, dmac_sts; + +	dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); +	dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n", +		__func__, bdma_chan->id, dmac_int); +	/* Clear channel interrupts */ +	iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); + +	if (dmac_int & TSI721_DMAC_INT_ERR) { +		dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); +		dev_err(bdma_chan->dchan.device->dev, +			"%s: DMA ERROR - DMAC%d_STS = 0x%x\n", +			__func__, bdma_chan->id, dmac_sts); +	} + +	if (dmac_int & TSI721_DMAC_INT_STFULL) { +		dev_err(bdma_chan->dchan.device->dev, +			"%s: DMAC%d descriptor status FIFO is full\n", +			__func__, bdma_chan->id); +	} + +	if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { +		tsi721_clr_stat(bdma_chan); +		spin_lock(&bdma_chan->lock); +		tsi721_advance_work(bdma_chan); +		spin_unlock(&bdma_chan->lock); +	} + +	/* Re-Enable BDMA channel interrupts */ +	iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); +} + +static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) +{ +	struct tsi721_tx_desc *desc = to_tsi721_desc(txd); +	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); +	dma_cookie_t cookie; + +	spin_lock_bh(&bdma_chan->lock); + +	cookie = txd->chan->cookie; +	if (++cookie < 0) +		cookie = 1; +	txd->chan->cookie = cookie; +	txd->cookie = cookie; + +	if (list_empty(&bdma_chan->active_list)) { +		list_add_tail(&desc->desc_node, &bdma_chan->active_list); +		tsi721_start_dma(bdma_chan); +	} else { +		list_add_tail(&desc->desc_node, &bdma_chan->queue); +	} + +	spin_unlock_bh(&bdma_chan->lock); +	return cookie; +} + +static int tsi721_alloc_chan_resources(struct dma_chan *dchan) +{ +	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); +#ifdef CONFIG_PCI_MSI +	struct tsi721_device *priv = to_tsi721(dchan->device); +#endif +	struct tsi721_tx_desc *desc = NULL; +	LIST_HEAD(tmp_list); +	int i; +	int rc; + +	if (bdma_chan->bd_base) +		return bdma_chan->bd_num - 1; + +	/* Initialize BDMA channel */ +	if (tsi721_bdma_ch_init(bdma_chan)) { +		dev_err(dchan->device->dev, "Unable to initialize data DMA" +			" channel %d, aborting\n", bdma_chan->id); +		return -ENOMEM; +	} + +	/* Alocate matching number of logical descriptors */ +	desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc), +			GFP_KERNEL); +	if (!desc) { +		dev_err(dchan->device->dev, +			"Failed to allocate logical descriptors\n"); +		rc = -ENOMEM; +		goto err_out; +	} + +	bdma_chan->tx_desc = desc; + +	for (i = 0; i < bdma_chan->bd_num - 1; i++) { +		dma_async_tx_descriptor_init(&desc[i].txd, dchan); +		desc[i].txd.tx_submit = tsi721_tx_submit; +		desc[i].txd.flags = DMA_CTRL_ACK; +		INIT_LIST_HEAD(&desc[i].tx_list); +		list_add_tail(&desc[i].desc_node, &tmp_list); +	} + +	spin_lock_bh(&bdma_chan->lock); +	list_splice(&tmp_list, &bdma_chan->free_list); +	bdma_chan->completed_cookie = dchan->cookie = 1; +	spin_unlock_bh(&bdma_chan->lock); + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) { +		/* Request interrupt service if we are in MSI-X mode */ +		rc = request_irq( +			priv->msix[TSI721_VECT_DMA0_DONE + +				   bdma_chan->id].vector, +			tsi721_bdma_msix, 0, +			priv->msix[TSI721_VECT_DMA0_DONE + +				   bdma_chan->id].irq_name, +			(void *)bdma_chan); + +		if (rc) { +			dev_dbg(dchan->device->dev, +				"Unable to allocate MSI-X interrupt for " +				"BDMA%d-DONE\n", bdma_chan->id); +			goto err_out; +		} + +		rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT + +					    bdma_chan->id].vector, +				tsi721_bdma_msix, 0, +				priv->msix[TSI721_VECT_DMA0_INT + +					   bdma_chan->id].irq_name, +				(void *)bdma_chan); + +		if (rc)	{ +			dev_dbg(dchan->device->dev, +				"Unable to allocate MSI-X interrupt for " +				"BDMA%d-INT\n", bdma_chan->id); +			free_irq( +				priv->msix[TSI721_VECT_DMA0_DONE + +					   bdma_chan->id].vector, +				(void *)bdma_chan); +			rc = -EIO; +			goto err_out; +		} +	} +#endif /* CONFIG_PCI_MSI */ + +	bdma_chan->active = true; +	tsi721_bdma_interrupt_enable(bdma_chan, 1); + +	return bdma_chan->bd_num - 1; + +err_out: +	kfree(desc); +	tsi721_bdma_ch_free(bdma_chan); +	return rc; +} + +static void tsi721_free_chan_resources(struct dma_chan *dchan) +{ +	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); +	struct tsi721_device *priv = to_tsi721(dchan->device); +	LIST_HEAD(list); + +	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); + +	if (bdma_chan->bd_base == NULL) +		return; + +	BUG_ON(!list_empty(&bdma_chan->active_list)); +	BUG_ON(!list_empty(&bdma_chan->queue)); + +	tsi721_bdma_interrupt_enable(bdma_chan, 0); +	bdma_chan->active = false; + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) { +		synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + +					   bdma_chan->id].vector); +		synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + +					   bdma_chan->id].vector); +	} else +#endif +	synchronize_irq(priv->pdev->irq); + +	tasklet_kill(&bdma_chan->tasklet); + +	spin_lock_bh(&bdma_chan->lock); +	list_splice_init(&bdma_chan->free_list, &list); +	spin_unlock_bh(&bdma_chan->lock); + +#ifdef CONFIG_PCI_MSI +	if (priv->flags & TSI721_USING_MSIX) { +		free_irq(priv->msix[TSI721_VECT_DMA0_DONE + +				    bdma_chan->id].vector, (void *)bdma_chan); +		free_irq(priv->msix[TSI721_VECT_DMA0_INT + +				    bdma_chan->id].vector, (void *)bdma_chan); +	} +#endif /* CONFIG_PCI_MSI */ + +	tsi721_bdma_ch_free(bdma_chan); +	kfree(bdma_chan->tx_desc); +} + +static +enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, +				 struct dma_tx_state *txstate) +{ +	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); +	dma_cookie_t		last_used; +	dma_cookie_t		last_completed; +	int			ret; + +	spin_lock_bh(&bdma_chan->lock); +	last_completed = bdma_chan->completed_cookie; +	last_used = dchan->cookie; +	spin_unlock_bh(&bdma_chan->lock); + +	ret = dma_async_is_complete(cookie, last_completed, last_used); + +	dma_set_tx_state(txstate, last_completed, last_used, 0); + +	dev_dbg(dchan->device->dev, +		"%s: exit, ret: %d, last_completed: %d, last_used: %d\n", +		__func__, ret, last_completed, last_used); + +	return ret; +} + +static void tsi721_issue_pending(struct dma_chan *dchan) +{ +	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + +	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); + +	if (tsi721_dma_is_idle(bdma_chan)) { +		spin_lock_bh(&bdma_chan->lock); +		tsi721_advance_work(bdma_chan); +		spin_unlock_bh(&bdma_chan->lock); +	} else +		dev_dbg(dchan->device->dev, +			"%s: DMA channel still busy\n", __func__); +} + +static +struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, +			struct scatterlist *sgl, unsigned int sg_len, +			enum dma_transfer_direction dir, unsigned long flags, +			void *tinfo) +{ +	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); +	struct tsi721_tx_desc *desc = NULL; +	struct tsi721_tx_desc *first = NULL; +	struct scatterlist *sg; +	struct rio_dma_ext *rext = tinfo; +	u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */ +	unsigned int i; +	u32 sys_size = dma_to_mport(dchan->device)->sys_size; +	enum dma_rtype rtype; +	dma_addr_t next_addr = -1; + +	if (!sgl || !sg_len) { +		dev_err(dchan->device->dev, "%s: No SG list\n", __func__); +		return NULL; +	} + +	if (dir == DMA_DEV_TO_MEM) +		rtype = NREAD; +	else if (dir == DMA_MEM_TO_DEV) { +		switch (rext->wr_type) { +		case RDW_ALL_NWRITE: +			rtype = ALL_NWRITE; +			break; +		case RDW_ALL_NWRITE_R: +			rtype = ALL_NWRITE_R; +			break; +		case RDW_LAST_NWRITE_R: +		default: +			rtype = LAST_NWRITE_R; +			break; +		} +	} else { +		dev_err(dchan->device->dev, +			"%s: Unsupported DMA direction option\n", __func__); +		return NULL; +	} + +	for_each_sg(sgl, sg, sg_len, i) { +		int err; + +		if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { +			dev_err(dchan->device->dev, +				"%s: SG entry %d is too large\n", __func__, i); +			goto err_desc_put; +		} + +		/* +		 * If this sg entry forms contiguous block with previous one, +		 * try to merge it into existing DMA descriptor +		 */ +		if (desc) { +			if (next_addr == sg_dma_address(sg) && +			    desc->bcount + sg_dma_len(sg) <= +						TSI721_BDMA_MAX_BCOUNT) { +				/* Adjust byte count of the descriptor */ +				desc->bcount += sg_dma_len(sg); +				goto entry_done; +			} + +			/* +			 * Finalize this descriptor using total +			 * byte count value. +			 */ +			tsi721_desc_fill_end(desc); +			dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", +				__func__, desc->bcount); +		} + +		/* +		 * Obtain and initialize a new descriptor +		 */ +		desc = tsi721_desc_get(bdma_chan); +		if (!desc) { +			dev_err(dchan->device->dev, +				"%s: Failed to get new descriptor for SG %d\n", +				__func__, i); +			goto err_desc_put; +		} + +		desc->destid = rext->destid; +		desc->rio_addr = rio_addr; +		desc->rio_addr_u = 0; +		desc->bcount = sg_dma_len(sg); + +		dev_dbg(dchan->device->dev, +			"sg%d desc: 0x%llx, addr: 0x%llx len: %d\n", +			i, (u64)desc->txd.phys, +			(unsigned long long)sg_dma_address(sg), +			sg_dma_len(sg)); + +		dev_dbg(dchan->device->dev, +			"bd_ptr = %p did=%d raddr=0x%llx\n", +			desc->hw_desc, desc->destid, desc->rio_addr); + +		err = tsi721_desc_fill_init(desc, sg, rtype, sys_size); +		if (err) { +			dev_err(dchan->device->dev, +				"Failed to build desc: %d\n", err); +			goto err_desc_put; +		} + +		next_addr = sg_dma_address(sg); + +		if (!first) +			first = desc; +		else +			list_add_tail(&desc->desc_node, &first->tx_list); + +entry_done: +		if (sg_is_last(sg)) { +			desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; +			tsi721_desc_fill_end(desc); +			dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", +				__func__, desc->bcount); +		} else { +			rio_addr += sg_dma_len(sg); +			next_addr += sg_dma_len(sg); +		} +	} + +	first->txd.cookie = -EBUSY; +	desc->txd.flags = flags; + +	return &first->txd; + +err_desc_put: +	tsi721_desc_put(bdma_chan, first); +	return NULL; +} + +static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, +			     unsigned long arg) +{ +	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); +	struct tsi721_tx_desc *desc, *_d; +	LIST_HEAD(list); + +	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); + +	if (cmd != DMA_TERMINATE_ALL) +		return -ENXIO; + +	spin_lock_bh(&bdma_chan->lock); + +	/* make sure to stop the transfer */ +	iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); + +	list_splice_init(&bdma_chan->active_list, &list); +	list_splice_init(&bdma_chan->queue, &list); + +	list_for_each_entry_safe(desc, _d, &list, desc_node) +		tsi721_dma_chain_complete(bdma_chan, desc); + +	spin_unlock_bh(&bdma_chan->lock); + +	return 0; +} + +int tsi721_register_dma(struct tsi721_device *priv) +{ +	int i; +	int nr_channels = TSI721_DMA_MAXCH; +	int err; +	struct rio_mport *mport = priv->mport; + +	mport->dma.dev = &priv->pdev->dev; +	mport->dma.chancnt = nr_channels; + +	INIT_LIST_HEAD(&mport->dma.channels); + +	for (i = 0; i < nr_channels; i++) { +		struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; + +		if (i == TSI721_DMACH_MAINT) +			continue; + +		bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ; +		bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); + +		bdma_chan->dchan.device = &mport->dma; +		bdma_chan->dchan.cookie = 1; +		bdma_chan->dchan.chan_id = i; +		bdma_chan->id = i; +		bdma_chan->active = false; + +		spin_lock_init(&bdma_chan->lock); + +		INIT_LIST_HEAD(&bdma_chan->active_list); +		INIT_LIST_HEAD(&bdma_chan->queue); +		INIT_LIST_HEAD(&bdma_chan->free_list); + +		tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, +			     (unsigned long)bdma_chan); +		list_add_tail(&bdma_chan->dchan.device_node, +			      &mport->dma.channels); +	} + +	dma_cap_zero(mport->dma.cap_mask); +	dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); +	dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); + +	mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; +	mport->dma.device_free_chan_resources = tsi721_free_chan_resources; +	mport->dma.device_tx_status = tsi721_tx_status; +	mport->dma.device_issue_pending = tsi721_issue_pending; +	mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; +	mport->dma.device_control = tsi721_device_control; + +	err = dma_async_device_register(&mport->dma); +	if (err) +		dev_err(&priv->pdev->dev, "Failed to register DMA device\n"); + +	return err; +} diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 0f4a53bdaa3..f301f059bb8 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c @@ -164,6 +164,12 @@ void rio_unregister_driver(struct rio_driver *rdrv)  	driver_unregister(&rdrv->driver);  } +void rio_attach_device(struct rio_dev *rdev) +{ +	rdev->dev.bus = &rio_bus_type; +} +EXPORT_SYMBOL_GPL(rio_attach_device); +  /**   *  rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure   *  @dev: the standard device structure to match against @@ -192,29 +198,57 @@ static int rio_match_bus(struct device *dev, struct device_driver *drv)        out:return 0;  } -struct device rio_bus = { -	.init_name = "rapidio", +static int rio_uevent(struct device *dev, struct kobj_uevent_env *env) +{ +	struct rio_dev *rdev; + +	if (!dev) +		return -ENODEV; + +	rdev = to_rio_dev(dev); +	if (!rdev) +		return -ENODEV; + +	if (add_uevent_var(env, "MODALIAS=rapidio:v%04Xd%04Xav%04Xad%04X", +			   rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did)) +		return -ENOMEM; +	return 0; +} + +struct class rio_mport_class = { +	.name		= "rapidio_port", +	.owner		= THIS_MODULE, +	.dev_groups	= rio_mport_groups,  }; +EXPORT_SYMBOL_GPL(rio_mport_class);  struct bus_type rio_bus_type = {  	.name = "rapidio",  	.match = rio_match_bus, -	.dev_attrs = rio_dev_attrs, +	.dev_groups = rio_dev_groups, +	.bus_groups = rio_bus_groups,  	.probe = rio_device_probe,  	.remove = rio_device_remove, +	.uevent	= rio_uevent,  };  /**   *  rio_bus_init - Register the RapidIO bus with the device model   * - *  Registers the RIO bus device and RIO bus type with the Linux + *  Registers the RIO mport device class and RIO bus type with the Linux   *  device model.   */  static int __init rio_bus_init(void)  { -	if (device_register(&rio_bus) < 0) -		printk("RIO: failed to register RIO bus device\n"); -	return bus_register(&rio_bus_type); +	int ret; + +	ret = class_register(&rio_mport_class); +	if (!ret) { +		ret = bus_register(&rio_bus_type); +		if (ret) +			class_unregister(&rio_mport_class); +	} +	return ret;  }  postcore_initcall(rio_bus_init); diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 1eb82c4c712..47a1b2ea76c 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -31,28 +31,17 @@  #include <linux/module.h>  #include <linux/spinlock.h>  #include <linux/timer.h> +#include <linux/sched.h>  #include <linux/jiffies.h>  #include <linux/slab.h>  #include "rio.h" -LIST_HEAD(rio_devices); -static LIST_HEAD(rio_switches); - -static void rio_enum_timeout(unsigned long); -  static void rio_init_em(struct rio_dev *rdev); -DEFINE_SPINLOCK(rio_global_list_lock); -  static int next_destid = 0; -static int next_switchid = 0; -static int next_net = 0;  static int next_comptag = 1; -static struct timer_list rio_enum_timer = -TIMER_INITIALIZER(rio_enum_timeout, 0, 0); -  static int rio_mport_phys_table[] = {  	RIO_EFB_PAR_EP_ID,  	RIO_EFB_PAR_EP_REC_ID, @@ -61,6 +50,109 @@ static int rio_mport_phys_table[] = {  	-1,  }; + +/** + * rio_destid_alloc - Allocate next available destID for given network + * @net: RIO network + * + * Returns next available device destination ID for the specified RIO network. + * Marks allocated ID as one in use. + * Returns RIO_INVALID_DESTID if new destID is not available. + */ +static u16 rio_destid_alloc(struct rio_net *net) +{ +	int destid; +	struct rio_id_table *idtab = &net->destid_table; + +	spin_lock(&idtab->lock); +	destid = find_first_zero_bit(idtab->table, idtab->max); + +	if (destid < idtab->max) { +		set_bit(destid, idtab->table); +		destid += idtab->start; +	} else +		destid = RIO_INVALID_DESTID; + +	spin_unlock(&idtab->lock); +	return (u16)destid; +} + +/** + * rio_destid_reserve - Reserve the specivied destID + * @net: RIO network + * @destid: destID to reserve + * + * Tries to reserve the specified destID. + * Returns 0 if successfull. + */ +static int rio_destid_reserve(struct rio_net *net, u16 destid) +{ +	int oldbit; +	struct rio_id_table *idtab = &net->destid_table; + +	destid -= idtab->start; +	spin_lock(&idtab->lock); +	oldbit = test_and_set_bit(destid, idtab->table); +	spin_unlock(&idtab->lock); +	return oldbit; +} + +/** + * rio_destid_free - free a previously allocated destID + * @net: RIO network + * @destid: destID to free + * + * Makes the specified destID available for use. + */ +static void rio_destid_free(struct rio_net *net, u16 destid) +{ +	struct rio_id_table *idtab = &net->destid_table; + +	destid -= idtab->start; +	spin_lock(&idtab->lock); +	clear_bit(destid, idtab->table); +	spin_unlock(&idtab->lock); +} + +/** + * rio_destid_first - return first destID in use + * @net: RIO network + */ +static u16 rio_destid_first(struct rio_net *net) +{ +	int destid; +	struct rio_id_table *idtab = &net->destid_table; + +	spin_lock(&idtab->lock); +	destid = find_first_bit(idtab->table, idtab->max); +	if (destid >= idtab->max) +		destid = RIO_INVALID_DESTID; +	else +		destid += idtab->start; +	spin_unlock(&idtab->lock); +	return (u16)destid; +} + +/** + * rio_destid_next - return next destID in use + * @net: RIO network + * @from: destination ID from which search shall continue + */ +static u16 rio_destid_next(struct rio_net *net, u16 from) +{ +	int destid; +	struct rio_id_table *idtab = &net->destid_table; + +	spin_lock(&idtab->lock); +	destid = find_next_bit(idtab->table, idtab->max, from); +	if (destid >= idtab->max) +		destid = RIO_INVALID_DESTID; +	else +		destid += idtab->start; +	spin_unlock(&idtab->lock); +	return (u16)destid; +} +  /**   * rio_get_device_id - Get the base/extended device id for a device   * @port: RIO master port @@ -109,14 +201,15 @@ static void rio_local_set_device_id(struct rio_mport *port, u16 did)  /**   * rio_clear_locks- Release all host locks and signal enumeration complete - * @port: Master port to issue transaction + * @net: RIO network to run on   *   * Marks the component tag CSR on each device with the enumeration   * complete flag. When complete, it then release the host locks on   * each device. Returns 0 on success or %-EINVAL on failure.   */ -static int rio_clear_locks(struct rio_mport *port) +static int rio_clear_locks(struct rio_net *net)  { +	struct rio_mport *port = net->hport;  	struct rio_dev *rdev;  	u32 result;  	int ret = 0; @@ -131,7 +224,7 @@ static int rio_clear_locks(struct rio_mport *port)  		       result);  		ret = -EINVAL;  	} -	list_for_each_entry(rdev, &rio_devices, global_list) { +	list_for_each_entry(rdev, &net->devices, net_list) {  		rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR,  				    port->host_deviceid);  		rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result); @@ -177,10 +270,6 @@ static int rio_enum_host(struct rio_mport *port)  	/* Set master port destid and init destid ctr */  	rio_local_set_device_id(port, port->host_deviceid); - -	if (next_destid == port->host_deviceid) -		next_destid++; -  	return 0;  } @@ -234,127 +323,6 @@ static int rio_is_switch(struct rio_dev *rdev)  }  /** - * rio_switch_init - Sets switch operations for a particular vendor switch - * @rdev: RIO device - * @do_enum: Enumeration/Discovery mode flag - * - * Searches the RIO switch ops table for known switch types. If the vid - * and did match a switch table entry, then call switch initialization - * routine to setup switch-specific routines. - */ -static void rio_switch_init(struct rio_dev *rdev, int do_enum) -{ -	struct rio_switch_ops *cur = __start_rio_switch_ops; -	struct rio_switch_ops *end = __end_rio_switch_ops; - -	while (cur < end) { -		if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { -			pr_debug("RIO: calling init routine for %s\n", -				 rio_name(rdev)); -			cur->init_hook(rdev, do_enum); -			break; -		} -		cur++; -	} - -	if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { -		pr_debug("RIO: adding STD routing ops for %s\n", -			rio_name(rdev)); -		rdev->rswitch->add_entry = rio_std_route_add_entry; -		rdev->rswitch->get_entry = rio_std_route_get_entry; -		rdev->rswitch->clr_table = rio_std_route_clr_table; -	} - -	if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) -		printk(KERN_ERR "RIO: missing routing ops for %s\n", -		       rio_name(rdev)); -} - -/** - * rio_add_device- Adds a RIO device to the device model - * @rdev: RIO device - * - * Adds the RIO device to the global device list and adds the RIO - * device to the RIO device list.  Creates the generic sysfs nodes - * for an RIO device. - */ -static int __devinit rio_add_device(struct rio_dev *rdev) -{ -	int err; - -	err = device_add(&rdev->dev); -	if (err) -		return err; - -	spin_lock(&rio_global_list_lock); -	list_add_tail(&rdev->global_list, &rio_devices); -	spin_unlock(&rio_global_list_lock); - -	rio_create_sysfs_dev_files(rdev); - -	return 0; -} - -/** - * rio_enable_rx_tx_port - enable input reciever and output transmitter of - * given port - * @port: Master port associated with the RIO network - * @local: local=1 select local port otherwise a far device is reached - * @destid: Destination ID of the device to check host bit - * @hopcount: Number of hops to reach the target - * @port_num: Port (-number on switch) to enable on a far end device - * - * Returns 0 or 1 from on General Control Command and Status Register - * (EXT_PTR+0x3C) - */ -inline int rio_enable_rx_tx_port(struct rio_mport *port, -				 int local, u16 destid, -				 u8 hopcount, u8 port_num) { -#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS -	u32 regval; -	u32 ext_ftr_ptr; - -	/* -	* enable rx input tx output port -	*/ -	pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " -		 "%d, port_num = %d)\n", local, destid, hopcount, port_num); - -	ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); - -	if (local) { -		rio_local_read_config_32(port, ext_ftr_ptr + -				RIO_PORT_N_CTL_CSR(0), -				®val); -	} else { -		if (rio_mport_read_config_32(port, destid, hopcount, -		ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) -			return -EIO; -	} - -	if (regval & RIO_PORT_N_CTL_P_TYP_SER) { -		/* serial */ -		regval = regval | RIO_PORT_N_CTL_EN_RX_SER -				| RIO_PORT_N_CTL_EN_TX_SER; -	} else { -		/* parallel */ -		regval = regval | RIO_PORT_N_CTL_EN_RX_PAR -				| RIO_PORT_N_CTL_EN_TX_PAR; -	} - -	if (local) { -		rio_local_write_config_32(port, ext_ftr_ptr + -					  RIO_PORT_N_CTL_CSR(0), regval); -	} else { -		if (rio_mport_write_config_32(port, destid, hopcount, -		    ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) -			return -EIO; -	} -#endif -	return 0; -} - -/**   * rio_setup_device- Allocates and sets up a RIO device   * @net: RIO network   * @port: Master port to send transactions @@ -370,7 +338,7 @@ inline int rio_enable_rx_tx_port(struct rio_mport *port,   * to a RIO device on success or NULL on failure.   *   */ -static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, +static struct rio_dev *rio_setup_device(struct rio_net *net,  					struct rio_mport *port, u16 destid,  					u8 hopcount, int do_enum)  { @@ -378,12 +346,30 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  	struct rio_dev *rdev;  	struct rio_switch *rswitch = NULL;  	int result, rdid; +	size_t size; +	u32 swpinfo = 0; + +	size = sizeof(struct rio_dev); +	if (rio_mport_read_config_32(port, destid, hopcount, +				     RIO_PEF_CAR, &result)) +		return NULL; -	rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL); +	if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { +		rio_mport_read_config_32(port, destid, hopcount, +					 RIO_SWP_INFO_CAR, &swpinfo); +		if (result & RIO_PEF_SWITCH) { +			size += (RIO_GET_TOTAL_PORTS(swpinfo) * +				sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); +		} +	} + +	rdev = kzalloc(size, GFP_KERNEL);  	if (!rdev)  		return NULL;  	rdev->net = net; +	rdev->pef = result; +	rdev->swpinfo = swpinfo;  	rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,  				 &result);  	rdev->did = result >> 16; @@ -397,8 +383,6 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  	rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR,  				 &result);  	rdev->asm_rev = result >> 16; -	rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, -				 &rdev->pef);  	if (rdev->pef & RIO_PEF_EXT_FEATURES) {  		rdev->efptr = result & 0xffff;  		rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, @@ -408,11 +392,6 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  						hopcount, RIO_EFB_ERR_MGMNT);  	} -	if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { -		rio_mport_read_config_32(port, destid, hopcount, -					 RIO_SWP_INFO_CAR, &rdev->swpinfo); -	} -  	rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,  				 &rdev->src_ops);  	rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR, @@ -427,32 +406,35 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  		rio_mport_write_config_32(port, destid, hopcount,  					  RIO_COMPONENT_TAG_CSR, next_comptag);  		rdev->comp_tag = next_comptag++; +		rdev->do_enum = true; +	}  else { +		rio_mport_read_config_32(port, destid, hopcount, +					 RIO_COMPONENT_TAG_CSR, +					 &rdev->comp_tag);  	}  	if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {  		if (do_enum) {  			rio_set_device_id(port, destid, hopcount, next_destid); -			rdev->destid = next_destid++; -			if (next_destid == port->host_deviceid) -				next_destid++; +			rdev->destid = next_destid; +			next_destid = rio_destid_alloc(net);  		} else  			rdev->destid = rio_get_device_id(port, destid, hopcount); -	} else -		/* Switch device has an associated destID */ -		rdev->destid = RIO_INVALID_DESTID; + +		rdev->hopcount = 0xff; +	} else { +		/* Switch device has an associated destID which +		 * will be adjusted later +		 */ +		rdev->destid = destid; +		rdev->hopcount = hopcount; +	}  	/* If a PE has both switch and other functions, show it as a switch */  	if (rio_is_switch(rdev)) { -		rswitch = kzalloc(sizeof(*rswitch) + -				  RIO_GET_TOTAL_PORTS(rdev->swpinfo) * -				  sizeof(rswitch->nextdev[0]), -				  GFP_KERNEL); -		if (!rswitch) -			goto cleanup; -		rswitch->switchid = next_switchid; -		rswitch->hopcount = hopcount; -		rswitch->destid = destid; +		rswitch = rdev->rswitch;  		rswitch->port_ok = 0; +		spin_lock_init(&rswitch->lock);  		rswitch->route_table = kzalloc(sizeof(u8)*  					RIO_MAX_ROUTE_ENTRIES(port->sys_size),  					GFP_KERNEL); @@ -462,17 +444,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  		for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);  				rdid++)  			rswitch->route_table[rdid] = RIO_INVALID_ROUTE; -		rdev->rswitch = rswitch; -		rswitch->rdev = rdev;  		dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, -			     rdev->rswitch->switchid); -		rio_switch_init(rdev, do_enum); +			     rdev->comp_tag & RIO_CTAG_UDEVID); -		if (do_enum && rdev->rswitch->clr_table) -			rdev->rswitch->clr_table(port, destid, hopcount, -						 RIO_GLOBAL_TABLE); +		if (do_enum) +			rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); -		list_add_tail(&rswitch->node, &rio_switches); +		list_add_tail(&rswitch->node, &net->switches);  	} else {  		if (do_enum) @@ -480,11 +458,11 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  			rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);  		dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, -			     rdev->destid); +			     rdev->comp_tag & RIO_CTAG_UDEVID);  	} -	rdev->dev.bus = &rio_bus_type; -	rdev->dev.parent = &rio_bus; +	rdev->dev.parent = &port->dev; +	rio_attach_device(rdev);  	device_initialize(&rdev->dev);  	rdev->dev.release = rio_release_dev; @@ -494,8 +472,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  	rdev->dev.dma_mask = &rdev->dma_mask;  	rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); -	if ((rdev->pef & RIO_PEF_INB_DOORBELL) && -	    (rdev->dst_ops & RIO_DST_OPS_DOORBELL)) +	if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)  		rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],  				   0, 0xffff); @@ -506,10 +483,9 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,  	return rdev;  cleanup: -	if (rswitch) { +	if (rswitch)  		kfree(rswitch->route_table); -		kfree(rswitch); -	} +  	kfree(rdev);  	return NULL;  } @@ -557,156 +533,6 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)  }  /** - * rio_lock_device - Acquires host device lock for specified device - * @port: Master port to send transaction - * @destid: Destination ID for device/switch - * @hopcount: Hopcount to reach switch - * @wait_ms: Max wait time in msec (0 = no timeout) - * - * Attepts to acquire host device lock for specified device - * Returns 0 if device lock acquired or EINVAL if timeout expires. - */ -static int -rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms) -{ -	u32 result; -	int tcnt = 0; - -	/* Attempt to acquire device lock */ -	rio_mport_write_config_32(port, destid, hopcount, -				  RIO_HOST_DID_LOCK_CSR, port->host_deviceid); -	rio_mport_read_config_32(port, destid, hopcount, -				 RIO_HOST_DID_LOCK_CSR, &result); - -	while (result != port->host_deviceid) { -		if (wait_ms != 0 && tcnt == wait_ms) { -			pr_debug("RIO: timeout when locking device %x:%x\n", -				destid, hopcount); -			return -EINVAL; -		} - -		/* Delay a bit */ -		mdelay(1); -		tcnt++; -		/* Try to acquire device lock again */ -		rio_mport_write_config_32(port, destid, -			hopcount, -			RIO_HOST_DID_LOCK_CSR, -			port->host_deviceid); -		rio_mport_read_config_32(port, destid, -			hopcount, -			RIO_HOST_DID_LOCK_CSR, &result); -	} - -	return 0; -} - -/** - * rio_unlock_device - Releases host device lock for specified device - * @port: Master port to send transaction - * @destid: Destination ID for device/switch - * @hopcount: Hopcount to reach switch - * - * Returns 0 if device lock released or EINVAL if fails. - */ -static int -rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) -{ -	u32 result; - -	/* Release device lock */ -	rio_mport_write_config_32(port, destid, -				  hopcount, -				  RIO_HOST_DID_LOCK_CSR, -				  port->host_deviceid); -	rio_mport_read_config_32(port, destid, hopcount, -		RIO_HOST_DID_LOCK_CSR, &result); -	if ((result & 0xffff) != 0xffff) { -		pr_debug("RIO: badness when releasing device lock %x:%x\n", -			 destid, hopcount); -		return -EINVAL; -	} - -	return 0; -} - -/** - * rio_route_add_entry- Add a route entry to a switch routing table - * @mport: Master port to send transaction - * @rswitch: Switch device - * @table: Routing table ID - * @route_destid: Destination ID to be routed - * @route_port: Port number to be routed - * @lock: lock switch device flag - * - * Calls the switch specific add_entry() method to add a route entry - * on a switch. The route table can be specified using the @table - * argument if a switch has per port routing tables or the normal - * use is to specific all tables (or the global table) by passing - * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL - * on failure. - */ -static int -rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, -		    u16 table, u16 route_destid, u8 route_port, int lock) -{ -	int rc; - -	if (lock) { -		rc = rio_lock_device(mport, rswitch->destid, -				     rswitch->hopcount, 1000); -		if (rc) -			return rc; -	} - -	rc = rswitch->add_entry(mport, rswitch->destid, -					rswitch->hopcount, table, -					route_destid, route_port); -	if (lock) -		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); - -	return rc; -} - -/** - * rio_route_get_entry- Read a route entry in a switch routing table - * @mport: Master port to send transaction - * @rswitch: Switch device - * @table: Routing table ID - * @route_destid: Destination ID to be routed - * @route_port: Pointer to read port number into - * @lock: lock switch device flag - * - * Calls the switch specific get_entry() method to read a route entry - * in a switch. The route table can be specified using the @table - * argument if a switch has per port routing tables or the normal - * use is to specific all tables (or the global table) by passing - * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL - * on failure. - */ -static int -rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, -		    u16 route_destid, u8 *route_port, int lock) -{ -	int rc; - -	if (lock) { -		rc = rio_lock_device(mport, rswitch->destid, -				     rswitch->hopcount, 1000); -		if (rc) -			return rc; -	} - -	rc = rswitch->get_entry(mport, rswitch->destid, -					rswitch->hopcount, table, -					route_destid, route_port); -	if (lock) -		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); - -	return rc; -} - -/**   * rio_get_host_deviceid_lock- Reads the Host Device ID Lock CSR on a device   * @port: Master port to send transaction   * @hopcount: Number of hops to the device @@ -735,15 +561,10 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)   * Recursively enumerates a RIO network.  Transactions are sent via the   * master port passed in @port.   */ -static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, +static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,  			 u8 hopcount, struct rio_dev *prev, int prev_port)  { -	int port_num; -	int cur_destid; -	int sw_destid; -	int sw_inport;  	struct rio_dev *rdev; -	u16 destid;  	u32 regval;  	int tmp; @@ -809,20 +630,26 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,  		return -1;  	if (rio_is_switch(rdev)) { -		next_switchid++; +		int sw_destid; +		int cur_destid; +		int sw_inport; +		u16 destid; +		int port_num; +  		sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); -		rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, +		rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,  				    port->host_deviceid, sw_inport, 0);  		rdev->rswitch->route_table[port->host_deviceid] = sw_inport; -		for (destid = 0; destid < next_destid; destid++) { -			if (destid == port->host_deviceid) -				continue; -			rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, -					    destid, sw_inport, 0); -			rdev->rswitch->route_table[destid] = sw_inport; +		destid = rio_destid_first(net); +		while (destid != RIO_INVALID_DESTID && destid < next_destid) { +			if (destid != port->host_deviceid) { +				rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, +						    destid, sw_inport, 0); +				rdev->rswitch->route_table[destid] = sw_inport; +			} +			destid = rio_destid_next(net, destid + 1);  		} -  		pr_debug(  		    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",  		    rio_name(rdev), rdev->vid, rdev->did, @@ -831,12 +658,10 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,  		for (port_num = 0;  		     port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo);  		     port_num++) { -			/*Enable Input Output Port (transmitter reviever)*/ -			rio_enable_rx_tx_port(port, 0, +			if (sw_inport == port_num) { +				rio_enable_rx_tx_port(port, 0,  					      RIO_ANY_DESTID(port->sys_size),  					      hopcount, port_num); - -			if (sw_inport == port_num) {  				rdev->rswitch->port_ok |= (1 << port_num);  				continue;  			} @@ -849,9 +674,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,  				pr_debug(  				    "RIO: scanning device on port %d\n",  				    port_num); +				rio_enable_rx_tx_port(port, 0, +					      RIO_ANY_DESTID(port->sys_size), +					      hopcount, port_num);  				rdev->rswitch->port_ok |= (1 << port_num); -				rio_route_add_entry(port, rdev->rswitch, -						RIO_GLOBAL_TABLE, +				rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,  						RIO_ANY_DESTID(port->sys_size),  						port_num, 0); @@ -860,19 +687,22 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,  					return -1;  				/* Update routing tables */ -				if (next_destid > cur_destid) { +				destid = rio_destid_next(net, cur_destid + 1); +				if (destid != RIO_INVALID_DESTID) {  					for (destid = cur_destid; -					     destid < next_destid; destid++) { -						if (destid == port->host_deviceid) -							continue; -						rio_route_add_entry(port, rdev->rswitch, +					     destid < next_destid;) { +						if (destid != port->host_deviceid) { +							rio_route_add_entry(rdev,  								    RIO_GLOBAL_TABLE,  								    destid,  								    port_num,  								    0); -						rdev->rswitch-> -						    route_table[destid] = -						    port_num; +							rdev->rswitch-> +								route_table[destid] = +								port_num; +						} +						destid = rio_destid_next(net, +								destid + 1);  					}  				}  			} else { @@ -898,13 +728,10 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,  		rio_init_em(rdev);  		/* Check for empty switch */ -		if (next_destid == sw_destid) { -			next_destid++; -			if (next_destid == port->host_deviceid) -				next_destid++; -		} +		if (next_destid == sw_destid) +			next_destid = rio_destid_alloc(net); -		rdev->rswitch->destid = sw_destid; +		rdev->destid = sw_destid;  	} else  		pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n",  		    rio_name(rdev), rdev->vid, rdev->did); @@ -916,7 +743,7 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,   * rio_enum_complete- Tests if enumeration of a network is complete   * @port: Master port to send transaction   * - * Tests the Component Tag CSR for non-zero value (enumeration + * Tests the PGCCSR discovered bit for non-zero value (enumeration   * complete flag). Return %1 if enumeration is complete or %0 if   * enumeration is incomplete.   */ @@ -926,7 +753,7 @@ static int rio_enum_complete(struct rio_mport *port)  	rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR,  				 ®val); -	return (regval & RIO_PORT_GEN_MASTER) ? 1 : 0; +	return (regval & RIO_PORT_GEN_DISCOVERED) ? 1 : 0;  }  /** @@ -935,13 +762,15 @@ static int rio_enum_complete(struct rio_mport *port)   * @port: Master port to send transactions   * @destid: Current destination ID in network   * @hopcount: Number of hops into the network + * @prev: previous rio_dev + * @prev_port: previous port number   *   * Recursively discovers a RIO network.  Transactions are sent via the   * master port passed in @port.   */ -static int __devinit +static int  rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, -	      u8 hopcount) +	      u8 hopcount, struct rio_dev *prev, int prev_port)  {  	u8 port_num, route_port;  	struct rio_dev *rdev; @@ -951,14 +780,15 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,  	if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) {  		/* Add device to the global and bus/net specific list. */  		list_add_tail(&rdev->net_list, &net->devices); +		rdev->prev = prev; +		if (prev && rio_is_switch(prev)) +			prev->rswitch->nextdev[prev_port] = rdev;  	} else  		return -1;  	if (rio_is_switch(rdev)) { -		next_switchid++; -  		/* Associated destid is how we accessed this switch */ -		rdev->rswitch->destid = destid; +		rdev->destid = destid;  		pr_debug(  		    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", @@ -981,7 +811,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,  				for (ndestid = 0;  				     ndestid < RIO_ANY_DESTID(port->sys_size);  				     ndestid++) { -					rio_route_get_entry(port, rdev->rswitch, +					rio_route_get_entry(rdev,  							    RIO_GLOBAL_TABLE,  							    ndestid,  							    &route_port, 0); @@ -992,8 +822,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,  				if (ndestid == RIO_ANY_DESTID(port->sys_size))  					continue;  				rio_unlock_device(port, destid, hopcount); -				if (rio_disc_peer -				    (net, port, ndestid, hopcount + 1) < 0) +				if (rio_disc_peer(net, port, ndestid, +					hopcount + 1, rdev, port_num) < 0)  					return -1;  			}  		} @@ -1037,65 +867,86 @@ static int rio_mport_is_active(struct rio_mport *port)  /**   * rio_alloc_net- Allocate and configure a new RIO network   * @port: Master port associated with the RIO network + * @do_enum: Enumeration/Discovery mode flag + * @start: logical minimal start id for new net   *   * Allocates a RIO network structure, initializes per-network   * list heads, and adds the associated master port to the   * network list of associated master ports. Returns a   * RIO network pointer on success or %NULL on failure.   */ -static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port) +static struct rio_net *rio_alloc_net(struct rio_mport *port, +					       int do_enum, u16 start)  {  	struct rio_net *net;  	net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); +	if (net && do_enum) { +		net->destid_table.table = kcalloc( +			BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)), +			sizeof(long), +			GFP_KERNEL); + +		if (net->destid_table.table == NULL) { +			pr_err("RIO: failed to allocate destID table\n"); +			kfree(net); +			net = NULL; +		} else { +			net->destid_table.start = start; +			net->destid_table.max = +					RIO_MAX_ROUTE_ENTRIES(port->sys_size); +			spin_lock_init(&net->destid_table.lock); +		} +	} +  	if (net) {  		INIT_LIST_HEAD(&net->node);  		INIT_LIST_HEAD(&net->devices); +		INIT_LIST_HEAD(&net->switches);  		INIT_LIST_HEAD(&net->mports);  		list_add_tail(&port->nnode, &net->mports);  		net->hport = port; -		net->id = next_net++; +		net->id = port->id;  	}  	return net;  }  /**   * rio_update_route_tables- Updates route tables in switches - * @port: Master port associated with the RIO network + * @net: RIO network to run update on   *   * For each enumerated device, ensure that each switch in a system   * has correct routing entries. Add routes for devices that where   * unknown dirung the first enumeration pass through the switch.   */ -static void rio_update_route_tables(struct rio_mport *port) +static void rio_update_route_tables(struct rio_net *net)  { -	struct rio_dev *rdev; +	struct rio_dev *rdev, *swrdev;  	struct rio_switch *rswitch;  	u8 sport;  	u16 destid; -	list_for_each_entry(rdev, &rio_devices, global_list) { +	list_for_each_entry(rdev, &net->devices, net_list) { -		destid = (rio_is_switch(rdev))?rdev->rswitch->destid:rdev->destid; +		destid = rdev->destid; -		list_for_each_entry(rswitch, &rio_switches, node) { +		list_for_each_entry(rswitch, &net->switches, node) {  			if (rio_is_switch(rdev)	&& (rdev->rswitch == rswitch))  				continue;  			if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { +				swrdev = sw_to_rio_dev(rswitch); +  				/* Skip if destid ends in empty switch*/ -				if (rswitch->destid == destid) +				if (swrdev->destid == destid)  					continue; -				sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo); +				sport = RIO_GET_PORT_NUM(swrdev->swpinfo); -				if (rswitch->add_entry)	{ -					rio_route_add_entry(port, rswitch, -						RIO_GLOBAL_TABLE, destid, -						sport, 0); -					rswitch->route_table[destid] = sport; -				} +				rio_route_add_entry(swrdev, RIO_GLOBAL_TABLE, +						    destid, sport, 0); +				rswitch->route_table[destid] = sport;  			}  		}  	} @@ -1111,8 +962,8 @@ static void rio_update_route_tables(struct rio_mport *port)  static void rio_init_em(struct rio_dev *rdev)  {  	if (rio_is_switch(rdev) && (rdev->em_efptr) && -	    (rdev->rswitch->em_init)) { -		rdev->rswitch->em_init(rdev); +	    rdev->rswitch->ops && rdev->rswitch->ops->em_init) { +		rdev->rswitch->ops->em_init(rdev);  	}  } @@ -1130,19 +981,30 @@ static void rio_pw_enable(struct rio_mport *port, int enable)  /**   * rio_enum_mport- Start enumeration through a master port   * @mport: Master port to send transactions + * @flags: Enumeration control flags   *   * Starts the enumeration process. If somebody has enumerated our   * master port device, then give up. If not and we have an active   * link, then start recursive peer enumeration. Returns %0 if   * enumeration succeeds or %-EBUSY if enumeration fails.   */ -int __devinit rio_enum_mport(struct rio_mport *mport) +static int rio_enum_mport(struct rio_mport *mport, u32 flags)  {  	struct rio_net *net = NULL;  	int rc = 0;  	printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id,  	       mport->name); + +	/* +	 * To avoid multiple start requests (repeat enumeration is not supported +	 * by this method) check if enumeration/discovery was performed for this +	 * mport: if mport was added into the list of mports for a net exit +	 * with error. +	 */ +	if (mport->nnode.next || mport->nnode.prev) +		return -EBUSY; +  	/* If somebody else enumerated our master port device, bail. */  	if (rio_enum_host(mport) < 0) {  		printk(KERN_INFO @@ -1154,12 +1016,16 @@ int __devinit rio_enum_mport(struct rio_mport *mport)  	/* If master port has an active link, allocate net and enum peers */  	if (rio_mport_is_active(mport)) { -		if (!(net = rio_alloc_net(mport))) { +		net = rio_alloc_net(mport, 1, 0); +		if (!net) {  			printk(KERN_ERR "RIO: failed to allocate new net\n");  			rc = -ENOMEM;  			goto out;  		} +		/* reserve mport destID in new net */ +		rio_destid_reserve(net, mport->host_deviceid); +  		/* Enable Input Output Port (transmitter reviever) */  		rio_enable_rx_tx_port(mport, 1, 0, 0, 0); @@ -1167,17 +1033,21 @@ int __devinit rio_enum_mport(struct rio_mport *mport)  		rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR,  					  next_comptag++); +		next_destid = rio_destid_alloc(net); +  		if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) {  			/* A higher priority host won enumeration, bail. */  			printk(KERN_INFO  			       "RIO: master port %d device has lost enumeration to a remote host\n",  			       mport->id); -			rio_clear_locks(mport); +			rio_clear_locks(net);  			rc = -EBUSY;  			goto out;  		} -		rio_update_route_tables(mport); -		rio_clear_locks(mport); +		/* free the last allocated destID (unused) */ +		rio_destid_free(net, next_destid); +		rio_update_route_tables(net); +		rio_clear_locks(net);  		rio_pw_enable(mport, 1);  	} else {  		printk(KERN_INFO "RIO: master port %d link inactive\n", @@ -1191,91 +1061,83 @@ int __devinit rio_enum_mport(struct rio_mport *mport)  /**   * rio_build_route_tables- Generate route tables from switch route entries + * @net: RIO network to run route tables scan on   *   * For each switch device, generate a route table by copying existing   * route entries from the switch.   */ -static void rio_build_route_tables(void) +static void rio_build_route_tables(struct rio_net *net)  { +	struct rio_switch *rswitch;  	struct rio_dev *rdev;  	int i;  	u8 sport; -	list_for_each_entry(rdev, &rio_devices, global_list) -		if (rio_is_switch(rdev)) { -			rio_lock_device(rdev->net->hport, rdev->rswitch->destid, -					rdev->rswitch->hopcount, 1000); -			for (i = 0; -			     i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); -			     i++) { -				if (rio_route_get_entry -				    (rdev->net->hport, rdev->rswitch, -				     RIO_GLOBAL_TABLE, i, &sport, 0) < 0) -					continue; -				rdev->rswitch->route_table[i] = sport; -			} +	list_for_each_entry(rswitch, &net->switches, node) { +		rdev = sw_to_rio_dev(rswitch); -			rio_unlock_device(rdev->net->hport, -					  rdev->rswitch->destid, -					  rdev->rswitch->hopcount); +		rio_lock_device(net->hport, rdev->destid, +				rdev->hopcount, 1000); +		for (i = 0; +		     i < RIO_MAX_ROUTE_ENTRIES(net->hport->sys_size); +		     i++) { +			if (rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, +						i, &sport, 0) < 0) +				continue; +			rswitch->route_table[i] = sport;  		} -} -/** - * rio_enum_timeout- Signal that enumeration timed out - * @data: Address of timeout flag. - * - * When the enumeration complete timer expires, set a flag that - * signals to the discovery process that enumeration did not - * complete in a sane amount of time. - */ -static void rio_enum_timeout(unsigned long data) -{ -	/* Enumeration timed out, set flag */ -	*(int *)data = 1; +		rio_unlock_device(net->hport, rdev->destid, rdev->hopcount); +	}  }  /**   * rio_disc_mport- Start discovery through a master port   * @mport: Master port to send transactions + * @flags: discovery control flags   *   * Starts the discovery process. If we have an active link, - * then wait for the signal that enumeration is complete. + * then wait for the signal that enumeration is complete (if wait + * is allowed).   * When enumeration completion is signaled, start recursive   * peer discovery. Returns %0 if discovery succeeds or %-EBUSY   * on failure.   */ -int __devinit rio_disc_mport(struct rio_mport *mport) +static int rio_disc_mport(struct rio_mport *mport, u32 flags)  {  	struct rio_net *net = NULL; -	int enum_timeout_flag = 0; +	unsigned long to_end;  	printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id,  	       mport->name);  	/* If master port has an active link, allocate net and discover peers */  	if (rio_mport_is_active(mport)) { -		if (!(net = rio_alloc_net(mport))) { -			printk(KERN_ERR "RIO: Failed to allocate new net\n"); -			goto bail; +		if (rio_enum_complete(mport)) +			goto enum_done; +		else if (flags & RIO_SCAN_ENUM_NO_WAIT) +			return -EAGAIN; + +		pr_debug("RIO: wait for enumeration to complete...\n"); + +		to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; +		while (time_before(jiffies, to_end)) { +			if (rio_enum_complete(mport)) +				goto enum_done; +			msleep(10);  		} -		pr_debug("RIO: wait for enumeration complete..."); - -		rio_enum_timer.expires = -		    jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; -		rio_enum_timer.data = (unsigned long)&enum_timeout_flag; -		add_timer(&rio_enum_timer); -		while (!rio_enum_complete(mport)) { -			mdelay(1); -			if (enum_timeout_flag) { -				del_timer_sync(&rio_enum_timer); -				goto timeout; -			} -		} -		del_timer_sync(&rio_enum_timer); +		pr_debug("RIO: discovery timeout on mport %d %s\n", +			 mport->id, mport->name); +		goto bail; +enum_done: +		pr_debug("RIO: ... enumeration done\n"); -		pr_debug("done\n"); +		net = rio_alloc_net(mport, 0, 0); +		if (!net) { +			printk(KERN_ERR "RIO: Failed to allocate new net\n"); +			goto bail; +		}  		/* Read DestID assigned by enumerator */  		rio_local_read_config_32(mport, RIO_DID_CSR, @@ -1284,20 +1146,56 @@ int __devinit rio_disc_mport(struct rio_mport *mport)  						   mport->host_deviceid);  		if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), -					0) < 0) { +					0, NULL, 0) < 0) {  			printk(KERN_INFO  			       "RIO: master port %d device has failed discovery\n",  			       mport->id);  			goto bail;  		} -		rio_build_route_tables(); +		rio_build_route_tables(net);  	}  	return 0; - -      timeout: -	pr_debug("timeout\n"); -      bail: +bail:  	return -EBUSY;  } + +static struct rio_scan rio_scan_ops = { +	.owner = THIS_MODULE, +	.enumerate = rio_enum_mport, +	.discover = rio_disc_mport, +}; + +static bool scan; +module_param(scan, bool, 0); +MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery " +			"(default = 0)"); + +/** + * rio_basic_attach: + * + * When this enumeration/discovery method is loaded as a module this function + * registers its specific enumeration and discover routines for all available + * RapidIO mport devices. The "scan" command line parameter controls ability of + * the module to start RapidIO enumeration/discovery automatically. + * + * Returns 0 for success or -EIO if unable to register itself. + * + * This enumeration/discovery method cannot be unloaded and therefore does not + * provide a matching cleanup_module routine. + */ + +static int __init rio_basic_attach(void) +{ +	if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops)) +		return -EIO; +	if (scan) +		rio_init_mports(); +	return 0; +} + +late_initcall(rio_basic_attach); + +MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 137ed93ee33..cdb005c0094 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c @@ -14,6 +14,7 @@  #include <linux/rio.h>  #include <linux/rio_drv.h>  #include <linux/stat.h> +#include <linux/capability.h>  #include "rio.h" @@ -26,6 +27,7 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf)			\  									\  	return sprintf(buf, format_string, rdev->field);		\  }									\ +static DEVICE_ATTR_RO(field);  rio_config_attr(did, "0x%04x\n");  rio_config_attr(vid, "0x%04x\n"); @@ -33,6 +35,8 @@ rio_config_attr(device_rev, "0x%08x\n");  rio_config_attr(asm_did, "0x%04x\n");  rio_config_attr(asm_vid, "0x%04x\n");  rio_config_attr(asm_rev, "0x%04x\n"); +rio_config_attr(destid, "0x%04x\n"); +rio_config_attr(hopcount, "0x%02x\n");  static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf)  { @@ -51,18 +55,70 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch  	return (str - buf);  } +static DEVICE_ATTR_RO(routes); -struct device_attribute rio_dev_attrs[] = { -	__ATTR_RO(did), -	__ATTR_RO(vid), -	__ATTR_RO(device_rev), -	__ATTR_RO(asm_did), -	__ATTR_RO(asm_vid), -	__ATTR_RO(asm_rev), -	__ATTR_NULL, +static ssize_t lprev_show(struct device *dev, +			  struct device_attribute *attr, char *buf) +{ +	struct rio_dev *rdev = to_rio_dev(dev); + +	return sprintf(buf, "%s\n", +			(rdev->prev) ? rio_name(rdev->prev) : "root"); +} +static DEVICE_ATTR_RO(lprev); + +static ssize_t lnext_show(struct device *dev, +			  struct device_attribute *attr, char *buf) +{ +	struct rio_dev *rdev = to_rio_dev(dev); +	char *str = buf; +	int i; + +	if (rdev->pef & RIO_PEF_SWITCH) { +		for (i = 0; i < RIO_GET_TOTAL_PORTS(rdev->swpinfo); i++) { +			if (rdev->rswitch->nextdev[i]) +				str += sprintf(str, "%s\n", +					rio_name(rdev->rswitch->nextdev[i])); +			else +				str += sprintf(str, "null\n"); +		} +	} + +	return str - buf; +} +static DEVICE_ATTR_RO(lnext); + +static ssize_t modalias_show(struct device *dev, +			     struct device_attribute *attr, char *buf) +{ +	struct rio_dev *rdev = to_rio_dev(dev); + +	return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n", +		       rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did); +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *rio_dev_attrs[] = { +	&dev_attr_did.attr, +	&dev_attr_vid.attr, +	&dev_attr_device_rev.attr, +	&dev_attr_asm_did.attr, +	&dev_attr_asm_vid.attr, +	&dev_attr_asm_rev.attr, +	&dev_attr_lprev.attr, +	&dev_attr_destid.attr, +	&dev_attr_modalias.attr, +	NULL,  }; -static DEVICE_ATTR(routes, S_IRUGO, routes_show, NULL); +static const struct attribute_group rio_dev_group = { +	.attrs = rio_dev_attrs, +}; + +const struct attribute_group *rio_dev_groups[] = { +	&rio_dev_group, +	NULL, +};  static ssize_t  rio_read_config(struct file *filp, struct kobject *kobj, @@ -77,9 +133,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,  	/* Several chips lock up trying to read undefined config space */  	if (capable(CAP_SYS_ADMIN)) -		size = 0x200000; +		size = RIO_MAINT_SPACE_SZ; -	if (off > size) +	if (off >= size)  		return 0;  	if (off + count > size) {  		size -= off; @@ -147,10 +203,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,  	loff_t init_off = off;  	u8 *data = (u8 *) buf; -	if (off > 0x200000) +	if (off >= RIO_MAINT_SPACE_SZ)  		return 0; -	if (off + count > 0x200000) { -		size = 0x200000 - off; +	if (off + count > RIO_MAINT_SPACE_SZ) { +		size = RIO_MAINT_SPACE_SZ - off;  		count = size;  	} @@ -200,7 +256,7 @@ static struct bin_attribute rio_config_attr = {  		 .name = "config",  		 .mode = S_IRUGO | S_IWUSR,  		 }, -	.size = 0x200000, +	.size = RIO_MAINT_SPACE_SZ,  	.read = rio_read_config,  	.write = rio_write_config,  }; @@ -217,10 +273,10 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)  	err = device_create_bin_file(&rdev->dev, &rio_config_attr); -	if (!err && rdev->rswitch) { -		err = device_create_file(&rdev->dev, &dev_attr_routes); -		if (!err && rdev->rswitch->sw_sysfs) -			err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE); +	if (!err && (rdev->pef & RIO_PEF_SWITCH)) { +		err |= device_create_file(&rdev->dev, &dev_attr_routes); +		err |= device_create_file(&rdev->dev, &dev_attr_lnext); +		err |= device_create_file(&rdev->dev, &dev_attr_hopcount);  	}  	if (err) @@ -239,9 +295,89 @@ int rio_create_sysfs_dev_files(struct rio_dev *rdev)  void rio_remove_sysfs_dev_files(struct rio_dev *rdev)  {  	device_remove_bin_file(&rdev->dev, &rio_config_attr); -	if (rdev->rswitch) { +	if (rdev->pef & RIO_PEF_SWITCH) {  		device_remove_file(&rdev->dev, &dev_attr_routes); -		if (rdev->rswitch->sw_sysfs) -			rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); +		device_remove_file(&rdev->dev, &dev_attr_lnext); +		device_remove_file(&rdev->dev, &dev_attr_hopcount);  	}  } + +static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, +				size_t count) +{ +	long val; +	int rc; + +	if (kstrtol(buf, 0, &val) < 0) +		return -EINVAL; + +	if (val == RIO_MPORT_ANY) { +		rc = rio_init_mports(); +		goto exit; +	} + +	if (val < 0 || val >= RIO_MAX_MPORTS) +		return -EINVAL; + +	rc = rio_mport_scan((int)val); +exit: +	if (!rc) +		rc = count; + +	return rc; +} +static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store); + +static struct attribute *rio_bus_attrs[] = { +	&bus_attr_scan.attr, +	NULL, +}; + +static const struct attribute_group rio_bus_group = { +	.attrs = rio_bus_attrs, +}; + +const struct attribute_group *rio_bus_groups[] = { +	&rio_bus_group, +	NULL, +}; + +static ssize_t +port_destid_show(struct device *dev, struct device_attribute *attr, +		 char *buf) +{ +	struct rio_mport *mport = to_rio_mport(dev); + +	if (mport) +		return sprintf(buf, "0x%04x\n", mport->host_deviceid); +	else +		return -ENODEV; +} +static DEVICE_ATTR_RO(port_destid); + +static ssize_t sys_size_show(struct device *dev, struct device_attribute *attr, +			   char *buf) +{ +	struct rio_mport *mport = to_rio_mport(dev); + +	if (mport) +		return sprintf(buf, "%u\n", mport->sys_size); +	else +		return -ENODEV; +} +static DEVICE_ATTR_RO(sys_size); + +static struct attribute *rio_mport_attrs[] = { +	&dev_attr_port_destid.attr, +	&dev_attr_sys_size.attr, +	NULL, +}; + +static const struct attribute_group rio_mport_group = { +	.attrs = rio_mport_attrs, +}; + +const struct attribute_group *rio_mport_groups[] = { +	&rio_mport_group, +	NULL, +}; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 7b5080c4556..a54ba0494dd 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -5,9 +5,8 @@   * Copyright 2005 MontaVista Software, Inc.   * Matt Porter <mporter@kernel.crashing.org>   * - * Copyright 2009 Integrated Device Technology, Inc. + * Copyright 2009 - 2013 Integrated Device Technology, Inc.   * Alex Bounine <alexandre.bounine@idt.com> - * - Added Port-Write/Error Management initialization and handling   *   * This program is free software; you can redistribute  it and/or modify it   * under  the terms of  the GNU General  Public License as published by the @@ -31,7 +30,25 @@  #include "rio.h" +MODULE_DESCRIPTION("RapidIO Subsystem Core"); +MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>"); +MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); +MODULE_LICENSE("GPL"); + +static int hdid[RIO_MAX_MPORTS]; +static int ids_num; +module_param_array(hdid, int, &ids_num, 0); +MODULE_PARM_DESC(hdid, +	"Destination ID assignment to local RapidIO controllers"); + +static LIST_HEAD(rio_devices); +static DEFINE_SPINLOCK(rio_global_list_lock); +  static LIST_HEAD(rio_mports); +static LIST_HEAD(rio_scans); +static DEFINE_MUTEX(rio_mport_list_lock); +static unsigned char next_portid; +static DEFINE_SPINLOCK(rio_mmap_lock);  /**   * rio_local_get_device_id - Get the base/extended device id for a port @@ -51,6 +68,32 @@ u16 rio_local_get_device_id(struct rio_mport *port)  }  /** + * rio_add_device- Adds a RIO device to the device model + * @rdev: RIO device + * + * Adds the RIO device to the global device list and adds the RIO + * device to the RIO device list.  Creates the generic sysfs nodes + * for an RIO device. + */ +int rio_add_device(struct rio_dev *rdev) +{ +	int err; + +	err = device_add(&rdev->dev); +	if (err) +		return err; + +	spin_lock(&rio_global_list_lock); +	list_add_tail(&rdev->global_list, &rio_devices); +	spin_unlock(&rio_global_list_lock); + +	rio_create_sysfs_dev_files(rdev); + +	return 0; +} +EXPORT_SYMBOL_GPL(rio_add_device); + +/**   * rio_request_inb_mbox - request inbound mailbox service   * @mport: RIO master port from which to allocate the mailbox resource   * @dev_id: Device specific pointer to pass on event @@ -68,9 +111,13 @@ int rio_request_inb_mbox(struct rio_mport *mport,  			 void (*minb) (struct rio_mport * mport, void *dev_id, int mbox,  				       int slot))  { -	int rc = 0; +	int rc = -ENOSYS; +	struct resource *res; -	struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); +	if (mport->ops->open_inb_mbox == NULL) +		goto out; + +	res = kmalloc(sizeof(struct resource), GFP_KERNEL);  	if (res) {  		rio_init_mbox_res(res, mbox, mbox); @@ -88,7 +135,7 @@ int rio_request_inb_mbox(struct rio_mport *mport,  		/* Hook the inbound message callback */  		mport->inb_msg[mbox].mcback = minb; -		rc = rio_open_inb_mbox(mport, dev_id, mbox, entries); +		rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries);  	} else  		rc = -ENOMEM; @@ -106,10 +153,13 @@ int rio_request_inb_mbox(struct rio_mport *mport,   */  int rio_release_inb_mbox(struct rio_mport *mport, int mbox)  { -	rio_close_inb_mbox(mport, mbox); +	if (mport->ops->close_inb_mbox) { +		mport->ops->close_inb_mbox(mport, mbox); -	/* Release the mailbox resource */ -	return release_resource(mport->inb_msg[mbox].res); +		/* Release the mailbox resource */ +		return release_resource(mport->inb_msg[mbox].res); +	} else +		return -ENOSYS;  }  /** @@ -129,9 +179,13 @@ int rio_request_outb_mbox(struct rio_mport *mport,  			  int entries,  			  void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot))  { -	int rc = 0; +	int rc = -ENOSYS; +	struct resource *res; -	struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); +	if (mport->ops->open_outb_mbox == NULL) +		goto out; + +	res = kmalloc(sizeof(struct resource), GFP_KERNEL);  	if (res) {  		rio_init_mbox_res(res, mbox, mbox); @@ -149,7 +203,7 @@ int rio_request_outb_mbox(struct rio_mport *mport,  		/* Hook the inbound message callback */  		mport->outb_msg[mbox].mcback = moutb; -		rc = rio_open_outb_mbox(mport, dev_id, mbox, entries); +		rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries);  	} else  		rc = -ENOMEM; @@ -167,10 +221,13 @@ int rio_request_outb_mbox(struct rio_mport *mport,   */  int rio_release_outb_mbox(struct rio_mport *mport, int mbox)  { -	rio_close_outb_mbox(mport, mbox); +	if (mport->ops->close_outb_mbox) { +		mport->ops->close_outb_mbox(mport, mbox); -	/* Release the mailbox resource */ -	return release_resource(mport->outb_msg[mbox].res); +		/* Release the mailbox resource */ +		return release_resource(mport->outb_msg[mbox].res); +	} else +		return -ENOSYS;  }  /** @@ -383,6 +440,49 @@ int rio_release_inb_pwrite(struct rio_dev *rdev)  EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);  /** + * rio_map_inb_region -- Map inbound memory region. + * @mport: Master port. + * @local: physical address of memory region to be mapped + * @rbase: RIO base address assigned to this window + * @size: Size of the memory region + * @rflags: Flags for mapping. + * + * Return: 0 -- Success. + * + * This function will create the mapping from RIO space to local memory. + */ +int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, +			u64 rbase, u32 size, u32 rflags) +{ +	int rc = 0; +	unsigned long flags; + +	if (!mport->ops->map_inb) +		return -1; +	spin_lock_irqsave(&rio_mmap_lock, flags); +	rc = mport->ops->map_inb(mport, local, rbase, size, rflags); +	spin_unlock_irqrestore(&rio_mmap_lock, flags); +	return rc; +} +EXPORT_SYMBOL_GPL(rio_map_inb_region); + +/** + * rio_unmap_inb_region -- Unmap the inbound memory region + * @mport: Master port + * @lstart: physical address of memory region to be unmapped + */ +void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) +{ +	unsigned long flags; +	if (!mport->ops->unmap_inb) +		return; +	spin_lock_irqsave(&rio_mmap_lock, flags); +	mport->ops->unmap_inb(mport, lstart); +	spin_unlock_irqrestore(&rio_mmap_lock, flags); +} +EXPORT_SYMBOL_GPL(rio_unmap_inb_region); + +/**   * rio_mport_get_physefb - Helper function that returns register offset   *                      for Physical Layer Extended Features Block.   * @port: Master port to issue transaction @@ -430,6 +530,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,  	return ext_ftr_ptr;  } +EXPORT_SYMBOL_GPL(rio_mport_get_physefb);  /**   * rio_get_comptag - Begin or continue searching for a RIO device by component tag @@ -462,6 +563,7 @@ exit:  	spin_unlock(&rio_global_list_lock);  	return rdev;  } +EXPORT_SYMBOL_GPL(rio_get_comptag);  /**   * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. @@ -471,16 +573,9 @@ exit:   */  int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)  { -	u8 hopcount = 0xff; -	u16 destid = rdev->destid;  	u32 regval; -	if (rdev->rswitch) { -		destid = rdev->rswitch->destid; -		hopcount = rdev->rswitch->hopcount; -	} - -	rio_mport_read_config_32(rdev->net->hport, destid, hopcount, +	rio_read_config_32(rdev,  				 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),  				 ®val);  	if (lock) @@ -488,11 +583,74 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)  	else  		regval &= ~RIO_PORT_N_CTL_LOCKOUT; -	rio_mport_write_config_32(rdev->net->hport, destid, hopcount, +	rio_write_config_32(rdev,  				  rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),  				  regval);  	return 0;  } +EXPORT_SYMBOL_GPL(rio_set_port_lockout); + +/** + * rio_enable_rx_tx_port - enable input receiver and output transmitter of + * given port + * @port: Master port associated with the RIO network + * @local: local=1 select local port otherwise a far device is reached + * @destid: Destination ID of the device to check host bit + * @hopcount: Number of hops to reach the target + * @port_num: Port (-number on switch) to enable on a far end device + * + * Returns 0 or 1 from on General Control Command and Status Register + * (EXT_PTR+0x3C) + */ +int rio_enable_rx_tx_port(struct rio_mport *port, +			  int local, u16 destid, +			  u8 hopcount, u8 port_num) +{ +#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS +	u32 regval; +	u32 ext_ftr_ptr; + +	/* +	* enable rx input tx output port +	*/ +	pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " +		 "%d, port_num = %d)\n", local, destid, hopcount, port_num); + +	ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); + +	if (local) { +		rio_local_read_config_32(port, ext_ftr_ptr + +				RIO_PORT_N_CTL_CSR(0), +				®val); +	} else { +		if (rio_mport_read_config_32(port, destid, hopcount, +		ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) +			return -EIO; +	} + +	if (regval & RIO_PORT_N_CTL_P_TYP_SER) { +		/* serial */ +		regval = regval | RIO_PORT_N_CTL_EN_RX_SER +				| RIO_PORT_N_CTL_EN_TX_SER; +	} else { +		/* parallel */ +		regval = regval | RIO_PORT_N_CTL_EN_RX_PAR +				| RIO_PORT_N_CTL_EN_TX_PAR; +	} + +	if (local) { +		rio_local_write_config_32(port, ext_ftr_ptr + +					  RIO_PORT_N_CTL_CSR(0), regval); +	} else { +		if (rio_mport_write_config_32(port, destid, hopcount, +		    ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) +			return -EIO; +	} +#endif +	return 0; +} +EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port); +  /**   * rio_chk_dev_route - Validate route to the specified device. @@ -507,7 +665,7 @@ static int  rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)  {  	u32 result; -	int p_port, dstid, rc = -EIO; +	int p_port, rc = -EIO;  	struct rio_dev *prev = NULL;  	/* Find switch with failed RIO link */ @@ -522,9 +680,7 @@ rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)  	if (prev == NULL)  		goto err_out; -	dstid = (rdev->pef & RIO_PEF_SWITCH) ? -			rdev->rswitch->destid : rdev->destid; -	p_port = prev->rswitch->route_table[dstid]; +	p_port = prev->rswitch->route_table[rdev->destid];  	if (p_port != RIO_INVALID_ROUTE) {  		pr_debug("RIO: link failed on [%s]-P%d\n", @@ -560,6 +716,7 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)  	return 0;  } +EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access);  /**   * rio_chk_dev_access - Validate access to the specified device. @@ -567,15 +724,8 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)   */  static int rio_chk_dev_access(struct rio_dev *rdev)  { -	u8 hopcount = 0xff; -	u16 destid = rdev->destid; - -	if (rdev->rswitch) { -		destid = rdev->rswitch->destid; -		hopcount = rdev->rswitch->hopcount; -	} - -	return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount); +	return rio_mport_chk_dev_access(rdev->net->hport, +					rdev->destid, rdev->hopcount);  }  /** @@ -588,23 +738,20 @@ static int rio_chk_dev_access(struct rio_dev *rdev)  static int  rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)  { -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	u32 regval;  	int checkcount;  	if (lnkresp) {  		/* Read from link maintenance response register  		 * to clear valid bit */ -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),  			®val);  		udelay(50);  	}  	/* Issue Input-status command */ -	rio_mport_write_config_32(mport, destid, hopcount, +	rio_write_config_32(rdev,  		rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),  		RIO_MNT_REQ_CMD_IS); @@ -615,7 +762,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)  	checkcount = 3;  	while (checkcount--) {  		udelay(50); -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),  			®val);  		if (regval & RIO_PORT_N_MNT_RSP_RVAL) { @@ -635,15 +782,12 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)   */  static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)  { -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];  	u32 regval;  	u32 far_ackid, far_linkstat, near_ackid;  	if (err_status == 0) -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),  			&err_status); @@ -661,7 +805,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)  			 pnum, regval);  		far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;  		far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),  			®val);  		pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); @@ -679,9 +823,8 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)  			/* Align near outstanding/outbound ackIDs with  			 * far inbound.  			 */ -			rio_mport_write_config_32(mport, destid, -				hopcount, rdev->phys_efptr + -					RIO_PORT_N_ACK_STS_CSR(pnum), +			rio_write_config_32(rdev, +				rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),  				(near_ackid << 24) |  					(far_ackid << 8) | far_ackid);  			/* Align far outstanding/outbound ackIDs with @@ -698,7 +841,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)  				pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");  		}  rd_err: -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),  			&err_status);  		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); @@ -710,7 +853,7 @@ rd_err:  				     RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);  		udelay(50); -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),  			&err_status);  		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); @@ -730,13 +873,10 @@ rd_err:  int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)  {  	struct rio_dev *rdev; -	struct rio_mport *mport; -	u8 hopcount; -	u16 destid;  	u32 err_status, em_perrdet, em_ltlerrdet;  	int rc, portnum; -	rdev = rio_get_comptag(pw_msg->em.comptag, NULL); +	rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);  	if (rdev == NULL) {  		/* Device removed or enumeration error */  		pr_debug("RIO: %s No matching device for CTag 0x%08x\n", @@ -800,17 +940,13 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)  		return 0;  	} -	mport = rdev->net->hport; -	destid = rdev->rswitch->destid; -	hopcount = rdev->rswitch->hopcount; -  	/*  	 * Process the port-write notification from switch  	 */ -	if (rdev->rswitch->em_handle) -		rdev->rswitch->em_handle(rdev, portnum); +	if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) +		rdev->rswitch->ops->em_handle(rdev, portnum); -	rio_mport_read_config_32(mport, destid, hopcount, +	rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),  			&err_status);  	pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); @@ -840,7 +976,7 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)  			rdev->rswitch->port_ok &= ~(1 << portnum);  			rio_set_port_lockout(rdev, portnum, 1); -			rio_mport_write_config_32(mport, destid, hopcount, +			rio_write_config_32(rdev,  				rdev->phys_efptr +  					RIO_PORT_N_ACK_STS_CSR(portnum),  				RIO_PORT_N_ACK_CLEAR); @@ -851,28 +987,28 @@ int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)  		}  	} -	rio_mport_read_config_32(mport, destid, hopcount, +	rio_read_config_32(rdev,  		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);  	if (em_perrdet) {  		pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",  			 portnum, em_perrdet);  		/* Clear EM Port N Error Detect CSR */ -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_write_config_32(rdev,  			rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);  	} -	rio_mport_read_config_32(mport, destid, hopcount, +	rio_read_config_32(rdev,  		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);  	if (em_ltlerrdet) {  		pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",  			 em_ltlerrdet);  		/* Clear EM L/T Layer Error Detect CSR */ -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_write_config_32(rdev,  			rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);  	}  	/* Clear remaining error bits and Port-Write Pending bit */ -	rio_mport_write_config_32(mport, destid, hopcount, +	rio_write_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),  			err_status); @@ -912,6 +1048,7 @@ rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,  		return RIO_GET_BLOCK_ID(reg_val);  	}  } +EXPORT_SYMBOL_GPL(rio_mport_get_efb);  /**   * rio_mport_get_feature - query for devices' extended features @@ -968,6 +1105,7 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,  	return 0;  } +EXPORT_SYMBOL_GPL(rio_mport_get_feature);  /**   * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did @@ -1042,8 +1180,9 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)   * @route_destid: destID entry in the RT   * @route_port: destination port for specified destID   */ -int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, -		       u16 table, u16 route_destid, u8 route_port) +static int +rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, +			u16 table, u16 route_destid, u8 route_port)  {  	if (table == RIO_GLOBAL_TABLE) {  		rio_mport_write_config_32(mport, destid, hopcount, @@ -1069,8 +1208,9 @@ int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,   * @route_destid: destID entry in the RT   * @route_port: returned destination port for specified destID   */ -int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, -		       u16 table, u16 route_destid, u8 *route_port) +static int +rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, +			u16 table, u16 route_destid, u8 *route_port)  {  	u32 result; @@ -1094,8 +1234,9 @@ int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,   * @hopcount: Number of switch hops to the device   * @table: routing table ID (global or port-specific)   */ -int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, -		       u16 table) +static int +rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, +			u16 table)  {  	u32 max_destid = 0xff;  	u32 i, pef, id_inc = 1, ext_cfg = 0; @@ -1136,11 +1277,501 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,  	return 0;  } +/** + * rio_lock_device - Acquires host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * @wait_ms: Max wait time in msec (0 = no timeout) + * + * Attepts to acquire host device lock for specified device + * Returns 0 if device lock acquired or EINVAL if timeout expires. + */ +int rio_lock_device(struct rio_mport *port, u16 destid, +		    u8 hopcount, int wait_ms) +{ +	u32 result; +	int tcnt = 0; + +	/* Attempt to acquire device lock */ +	rio_mport_write_config_32(port, destid, hopcount, +				  RIO_HOST_DID_LOCK_CSR, port->host_deviceid); +	rio_mport_read_config_32(port, destid, hopcount, +				 RIO_HOST_DID_LOCK_CSR, &result); + +	while (result != port->host_deviceid) { +		if (wait_ms != 0 && tcnt == wait_ms) { +			pr_debug("RIO: timeout when locking device %x:%x\n", +				destid, hopcount); +			return -EINVAL; +		} + +		/* Delay a bit */ +		mdelay(1); +		tcnt++; +		/* Try to acquire device lock again */ +		rio_mport_write_config_32(port, destid, +			hopcount, +			RIO_HOST_DID_LOCK_CSR, +			port->host_deviceid); +		rio_mport_read_config_32(port, destid, +			hopcount, +			RIO_HOST_DID_LOCK_CSR, &result); +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(rio_lock_device); + +/** + * rio_unlock_device - Releases host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * + * Returns 0 if device lock released or EINVAL if fails. + */ +int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) +{ +	u32 result; + +	/* Release device lock */ +	rio_mport_write_config_32(port, destid, +				  hopcount, +				  RIO_HOST_DID_LOCK_CSR, +				  port->host_deviceid); +	rio_mport_read_config_32(port, destid, hopcount, +		RIO_HOST_DID_LOCK_CSR, &result); +	if ((result & 0xffff) != 0xffff) { +		pr_debug("RIO: badness when releasing device lock %x:%x\n", +			 destid, hopcount); +		return -EINVAL; +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(rio_unlock_device); + +/** + * rio_route_add_entry- Add a route entry to a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Port number to be routed + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific add_entry() method to add a route + * entry into a switch routing table. Otherwise uses standard RT update method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_add_entry(struct rio_dev *rdev, +			u16 table, u16 route_destid, u8 route_port, int lock) +{ +	int rc = -EINVAL; +	struct rio_switch_ops *ops = rdev->rswitch->ops; + +	if (lock) { +		rc = rio_lock_device(rdev->net->hport, rdev->destid, +				     rdev->hopcount, 1000); +		if (rc) +			return rc; +	} + +	spin_lock(&rdev->rswitch->lock); + +	if (ops == NULL || ops->add_entry == NULL) { +		rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, +					     rdev->hopcount, table, +					     route_destid, route_port); +	} else if (try_module_get(ops->owner)) { +		rc = ops->add_entry(rdev->net->hport, rdev->destid, +				    rdev->hopcount, table, route_destid, +				    route_port); +		module_put(ops->owner); +	} + +	spin_unlock(&rdev->rswitch->lock); + +	if (lock) +		rio_unlock_device(rdev->net->hport, rdev->destid, +				  rdev->hopcount); + +	return rc; +} +EXPORT_SYMBOL_GPL(rio_route_add_entry); + +/** + * rio_route_get_entry- Read an entry from a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Pointer to read port number into + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific get_entry() method to fetch a route + * entry from a switch routing table. Otherwise uses standard RT read method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_get_entry(struct rio_dev *rdev, u16 table, +			u16 route_destid, u8 *route_port, int lock) +{ +	int rc = -EINVAL; +	struct rio_switch_ops *ops = rdev->rswitch->ops; + +	if (lock) { +		rc = rio_lock_device(rdev->net->hport, rdev->destid, +				     rdev->hopcount, 1000); +		if (rc) +			return rc; +	} + +	spin_lock(&rdev->rswitch->lock); + +	if (ops == NULL || ops->get_entry == NULL) { +		rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, +					     rdev->hopcount, table, +					     route_destid, route_port); +	} else if (try_module_get(ops->owner)) { +		rc = ops->get_entry(rdev->net->hport, rdev->destid, +				    rdev->hopcount, table, route_destid, +				    route_port); +		module_put(ops->owner); +	} + +	spin_unlock(&rdev->rswitch->lock); + +	if (lock) +		rio_unlock_device(rdev->net->hport, rdev->destid, +				  rdev->hopcount); +	return rc; +} +EXPORT_SYMBOL_GPL(rio_route_get_entry); + +/** + * rio_route_clr_table - Clear a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific clr_table() method to clear a switch + * routing table. Otherwise uses standard RT write method as defined by RapidIO + * specification. A specific routing table can be selected using the @table + * argument if a switch has per port routing tables or the standard (or global) + * table may be used by passing %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) +{ +	int rc = -EINVAL; +	struct rio_switch_ops *ops = rdev->rswitch->ops; + +	if (lock) { +		rc = rio_lock_device(rdev->net->hport, rdev->destid, +				     rdev->hopcount, 1000); +		if (rc) +			return rc; +	} + +	spin_lock(&rdev->rswitch->lock); + +	if (ops == NULL || ops->clr_table == NULL) { +		rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, +					     rdev->hopcount, table); +	} else if (try_module_get(ops->owner)) { +		rc = ops->clr_table(rdev->net->hport, rdev->destid, +				    rdev->hopcount, table); + +		module_put(ops->owner); +	} + +	spin_unlock(&rdev->rswitch->lock); + +	if (lock) +		rio_unlock_device(rdev->net->hport, rdev->destid, +				  rdev->hopcount); + +	return rc; +} +EXPORT_SYMBOL_GPL(rio_route_clr_table); + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +static bool rio_chan_filter(struct dma_chan *chan, void *arg) +{ +	struct rio_dev *rdev = arg; + +	/* Check that DMA device belongs to the right MPORT */ +	return (rdev->net->hport == +		container_of(chan->device, struct rio_mport, dma)); +} + +/** + * rio_request_dma - request RapidIO capable DMA channel that supports + *   specified target RapidIO device. + * @rdev: RIO device control structure + * + * Returns pointer to allocated DMA channel or NULL if failed. + */ +struct dma_chan *rio_request_dma(struct rio_dev *rdev) +{ +	dma_cap_mask_t mask; +	struct dma_chan *dchan; + +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); +	dchan = dma_request_channel(mask, rio_chan_filter, rdev); + +	return dchan; +} +EXPORT_SYMBOL_GPL(rio_request_dma); + +/** + * rio_release_dma - release specified DMA channel + * @dchan: DMA channel to release + */ +void rio_release_dma(struct dma_chan *dchan) +{ +	dma_release_channel(dchan); +} +EXPORT_SYMBOL_GPL(rio_release_dma); + +/** + * rio_dma_prep_slave_sg - RapidIO specific wrapper + *   for device_prep_slave_sg callback defined by DMAENGINE. + * @rdev: RIO device control structure + * @dchan: DMA channel to configure + * @data: RIO specific data descriptor + * @direction: DMA data transfer direction (TO or FROM the device) + * @flags: dmaengine defined flags + * + * Initializes RapidIO capable DMA channel for the specified data transfer. + * Uses DMA channel private extension to pass information related to remote + * target RIO device. + * Returns pointer to DMA transaction descriptor or NULL if failed. + */ +struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, +	struct dma_chan *dchan, struct rio_dma_data *data, +	enum dma_transfer_direction direction, unsigned long flags) +{ +	struct dma_async_tx_descriptor *txd = NULL; +	struct rio_dma_ext rio_ext; + +	if (dchan->device->device_prep_slave_sg == NULL) { +		pr_err("%s: prep_rio_sg == NULL\n", __func__); +		return NULL; +	} + +	rio_ext.destid = rdev->destid; +	rio_ext.rio_addr_u = data->rio_addr_u; +	rio_ext.rio_addr = data->rio_addr; +	rio_ext.wr_type = data->wr_type; + +	txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, +					direction, flags, &rio_ext); + +	return txd; +} +EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); + +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +/** + * rio_find_mport - find RIO mport by its ID + * @mport_id: number (ID) of mport device + * + * Given a RIO mport number, the desired mport is located + * in the global list of mports. If the mport is found, a pointer to its + * data structure is returned.  If no mport is found, %NULL is returned. + */ +struct rio_mport *rio_find_mport(int mport_id) +{ +	struct rio_mport *port; + +	mutex_lock(&rio_mport_list_lock); +	list_for_each_entry(port, &rio_mports, node) { +		if (port->id == mport_id) +			goto found; +	} +	port = NULL; +found: +	mutex_unlock(&rio_mport_list_lock); + +	return port; +} + +/** + * rio_register_scan - enumeration/discovery method registration interface + * @mport_id: mport device ID for which fabric scan routine has to be set + *            (RIO_MPORT_ANY = set for all available mports) + * @scan_ops: enumeration/discovery operations structure + * + * Registers enumeration/discovery operations with RapidIO subsystem and + * attaches it to the specified mport device (or all available mports + * if RIO_MPORT_ANY is specified). + * + * Returns error if the mport already has an enumerator attached to it. + * In case of RIO_MPORT_ANY skips mports with valid scan routines (no error). + */ +int rio_register_scan(int mport_id, struct rio_scan *scan_ops) +{ +	struct rio_mport *port; +	struct rio_scan_node *scan; +	int rc = 0; + +	pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); + +	if ((mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) || +	    !scan_ops) +		return -EINVAL; + +	mutex_lock(&rio_mport_list_lock); + +	/* +	 * Check if there is another enumerator already registered for +	 * the same mport ID (including RIO_MPORT_ANY). Multiple enumerators +	 * for the same mport ID are not supported. +	 */ +	list_for_each_entry(scan, &rio_scans, node) { +		if (scan->mport_id == mport_id) { +			rc = -EBUSY; +			goto err_out; +		} +	} + +	/* +	 * Allocate and initialize new scan registration node. +	 */ +	scan = kzalloc(sizeof(*scan), GFP_KERNEL); +	if (!scan) { +		rc = -ENOMEM; +		goto err_out; +	} + +	scan->mport_id = mport_id; +	scan->ops = scan_ops; + +	/* +	 * Traverse the list of registered mports to attach this new scan. +	 * +	 * The new scan with matching mport ID overrides any previously attached +	 * scan assuming that old scan (if any) is the default one (based on the +	 * enumerator registration check above). +	 * If the new scan is the global one, it will be attached only to mports +	 * that do not have their own individual operations already attached. +	 */ +	list_for_each_entry(port, &rio_mports, node) { +		if (port->id == mport_id) { +			port->nscan = scan_ops; +			break; +		} else if (mport_id == RIO_MPORT_ANY && !port->nscan) +			port->nscan = scan_ops; +	} + +	list_add_tail(&scan->node, &rio_scans); + +err_out: +	mutex_unlock(&rio_mport_list_lock); + +	return rc; +} +EXPORT_SYMBOL_GPL(rio_register_scan); + +/** + * rio_unregister_scan - removes enumeration/discovery method from mport + * @mport_id: mport device ID for which fabric scan routine has to be + *            unregistered (RIO_MPORT_ANY = apply to all mports that use + *            the specified scan_ops) + * @scan_ops: enumeration/discovery operations structure + * + * Removes enumeration or discovery method assigned to the specified mport + * device. If RIO_MPORT_ANY is specified, removes the specified operations from + * all mports that have them attached. + */ +int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) +{ +	struct rio_mport *port; +	struct rio_scan_node *scan; + +	pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); + +	if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) +		return -EINVAL; + +	mutex_lock(&rio_mport_list_lock); + +	list_for_each_entry(port, &rio_mports, node) +		if (port->id == mport_id || +		    (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) +			port->nscan = NULL; + +	list_for_each_entry(scan, &rio_scans, node) { +		if (scan->mport_id == mport_id) { +			list_del(&scan->node); +			kfree(scan); +			break; +		} +	} + +	mutex_unlock(&rio_mport_list_lock); + +	return 0; +} +EXPORT_SYMBOL_GPL(rio_unregister_scan); + +/** + * rio_mport_scan - execute enumeration/discovery on the specified mport + * @mport_id: number (ID) of mport device + */ +int rio_mport_scan(int mport_id) +{ +	struct rio_mport *port = NULL; +	int rc; + +	mutex_lock(&rio_mport_list_lock); +	list_for_each_entry(port, &rio_mports, node) { +		if (port->id == mport_id) +			goto found; +	} +	mutex_unlock(&rio_mport_list_lock); +	return -ENODEV; +found: +	if (!port->nscan) { +		mutex_unlock(&rio_mport_list_lock); +		return -EINVAL; +	} + +	if (!try_module_get(port->nscan->owner)) { +		mutex_unlock(&rio_mport_list_lock); +		return -ENODEV; +	} + +	mutex_unlock(&rio_mport_list_lock); + +	if (port->host_deviceid >= 0) +		rc = port->nscan->enumerate(port, 0); +	else +		rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); + +	module_put(port->nscan->owner); +	return rc; +} +  static void rio_fixup_device(struct rio_dev *dev)  {  } -static int __devinit rio_init(void) +static int rio_init(void)  {  	struct rio_dev *dev = NULL; @@ -1150,38 +1781,151 @@ static int __devinit rio_init(void)  	return 0;  } -device_initcall(rio_init); +static struct workqueue_struct *rio_wq; + +struct rio_disc_work { +	struct work_struct	work; +	struct rio_mport	*mport; +}; -int __devinit rio_init_mports(void) +static void disc_work_handler(struct work_struct *_work) +{ +	struct rio_disc_work *work; + +	work = container_of(_work, struct rio_disc_work, work); +	pr_debug("RIO: discovery work for mport %d %s\n", +		 work->mport->id, work->mport->name); +	if (try_module_get(work->mport->nscan->owner)) { +		work->mport->nscan->discover(work->mport, 0); +		module_put(work->mport->nscan->owner); +	} +} + +int rio_init_mports(void)  { -	int rc = 0;  	struct rio_mport *port; +	struct rio_disc_work *work; +	int n = 0; + +	if (!next_portid) +		return -ENODEV; +	/* +	 * First, run enumerations and check if we need to perform discovery +	 * on any of the registered mports. +	 */ +	mutex_lock(&rio_mport_list_lock);  	list_for_each_entry(port, &rio_mports, node) { -		if (!request_mem_region(port->iores.start, -					resource_size(&port->iores), -					port->name)) { -			printk(KERN_ERR -			       "RIO: Error requesting master port region 0x%016llx-0x%016llx\n", -			       (u64)port->iores.start, (u64)port->iores.end); -			rc = -ENOMEM; -			goto out; -		} +		if (port->host_deviceid >= 0) { +			if (port->nscan && try_module_get(port->nscan->owner)) { +				port->nscan->enumerate(port, 0); +				module_put(port->nscan->owner); +			} +		} else +			n++; +	} +	mutex_unlock(&rio_mport_list_lock); -		if (port->host_deviceid >= 0) -			rio_enum_mport(port); -		else -			rio_disc_mport(port); +	if (!n) +		goto no_disc; + +	/* +	 * If we have mports that require discovery schedule a discovery work +	 * for each of them. If the code below fails to allocate needed +	 * resources, exit without error to keep results of enumeration +	 * process (if any). +	 * TODO: Implement restart of discovery process for all or +	 * individual discovering mports. +	 */ +	rio_wq = alloc_workqueue("riodisc", 0, 0); +	if (!rio_wq) { +		pr_err("RIO: unable allocate rio_wq\n"); +		goto no_disc;  	} -      out: -	return rc; +	work = kcalloc(n, sizeof *work, GFP_KERNEL); +	if (!work) { +		pr_err("RIO: no memory for work struct\n"); +		destroy_workqueue(rio_wq); +		goto no_disc; +	} + +	n = 0; +	mutex_lock(&rio_mport_list_lock); +	list_for_each_entry(port, &rio_mports, node) { +		if (port->host_deviceid < 0 && port->nscan) { +			work[n].mport = port; +			INIT_WORK(&work[n].work, disc_work_handler); +			queue_work(rio_wq, &work[n].work); +			n++; +		} +	} + +	flush_workqueue(rio_wq); +	mutex_unlock(&rio_mport_list_lock); +	pr_debug("RIO: destroy discovery workqueue\n"); +	destroy_workqueue(rio_wq); +	kfree(work); + +no_disc: +	rio_init(); + +	return 0; +} + +static int rio_get_hdid(int index) +{ +	if (ids_num == 0 || ids_num <= index || index >= RIO_MAX_MPORTS) +		return -1; + +	return hdid[index];  } -void rio_register_mport(struct rio_mport *port) +int rio_register_mport(struct rio_mport *port)  { +	struct rio_scan_node *scan = NULL; +	int res = 0; + +	if (next_portid >= RIO_MAX_MPORTS) { +		pr_err("RIO: reached specified max number of mports\n"); +		return 1; +	} + +	port->id = next_portid++; +	port->host_deviceid = rio_get_hdid(port->id); +	port->nscan = NULL; + +	dev_set_name(&port->dev, "rapidio%d", port->id); +	port->dev.class = &rio_mport_class; + +	res = device_register(&port->dev); +	if (res) +		dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", +			port->id, res); +	else +		dev_dbg(&port->dev, "RIO: mport%d registered\n", port->id); + +	mutex_lock(&rio_mport_list_lock);  	list_add_tail(&port->node, &rio_mports); + +	/* +	 * Check if there are any registered enumeration/discovery operations +	 * that have to be attached to the added mport. +	 */ +	list_for_each_entry(scan, &rio_scans, node) { +		if (port->id == scan->mport_id || +		    scan->mport_id == RIO_MPORT_ANY) { +			port->nscan = scan->ops; +			if (port->id == scan->mport_id) +				break; +		} +	} +	mutex_unlock(&rio_mport_list_lock); + +	pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id); +	return 0;  } +EXPORT_SYMBOL_GPL(rio_register_mport);  EXPORT_SYMBOL_GPL(rio_local_get_device_id);  EXPORT_SYMBOL_GPL(rio_get_device); @@ -1194,3 +1938,4 @@ EXPORT_SYMBOL_GPL(rio_request_inb_mbox);  EXPORT_SYMBOL_GPL(rio_release_inb_mbox);  EXPORT_SYMBOL_GPL(rio_request_outb_mbox);  EXPORT_SYMBOL_GPL(rio_release_outb_mbox); +EXPORT_SYMBOL_GPL(rio_init_mports); diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index b1af414f15e..2d0550e08ea 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -15,6 +15,7 @@  #include <linux/rio.h>  #define RIO_MAX_CHK_RETRY	3 +#define RIO_MPORT_ANY		(-1)  /* Functions internal to the RIO core code */ @@ -27,46 +28,29 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,  extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,  				    u8 hopcount);  extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); -extern int rio_enum_mport(struct rio_mport *mport); -extern int rio_disc_mport(struct rio_mport *mport); -extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, -				   u8 hopcount, u16 table, u16 route_destid, -				   u8 route_port); -extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, -				   u8 hopcount, u16 table, u16 route_destid, -				   u8 *route_port); -extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, -				   u8 hopcount, u16 table); +extern int rio_lock_device(struct rio_mport *port, u16 destid, +			u8 hopcount, int wait_ms); +extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount); +extern int rio_route_add_entry(struct rio_dev *rdev, +			u16 table, u16 route_destid, u8 route_port, int lock); +extern int rio_route_get_entry(struct rio_dev *rdev, u16 table, +			u16 route_destid, u8 *route_port, int lock); +extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock);  extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);  extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); +extern int rio_add_device(struct rio_dev *rdev); +extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, +				 u8 hopcount, u8 port_num); +extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); +extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops); +extern void rio_attach_device(struct rio_dev *rdev); +extern struct rio_mport *rio_find_mport(int mport_id); +extern int rio_mport_scan(int mport_id);  /* Structures internal to the RIO core code */ -extern struct device_attribute rio_dev_attrs[]; -extern spinlock_t rio_global_list_lock; - -extern struct rio_switch_ops __start_rio_switch_ops[]; -extern struct rio_switch_ops __end_rio_switch_ops[]; - -/* Helpers internal to the RIO core code */ -#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \ -	static const struct rio_switch_ops __rio_switch_##name __used \ -	__section(section) = { vid, did, init_hook }; - -/** - * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine - * @vid: RIO vendor ID - * @did: RIO device ID - * @init_hook: Callback that performs switch-specific initialization - * - * Manipulating switch route tables and error management in RIO - * is switch specific. This registers a switch by vendor and device ID with - * initialization callback for setting up switch operations and (if required) - * hardware initialization. A &struct rio_switch_ops is initialized with - * pointer to the init routine and placed into a RIO-specific kernel section. - */ -#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook)		\ -	DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \ -			vid, did, init_hook) +extern const struct attribute_group *rio_dev_groups[]; +extern const struct attribute_group *rio_bus_groups[]; +extern const struct attribute_group *rio_mport_groups[];  #define RIO_GET_DID(size, x)	(size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))  #define RIO_SET_DID(size, x)	(size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig index f47fee5d456..345841562f9 100644 --- a/drivers/rapidio/switches/Kconfig +++ b/drivers/rapidio/switches/Kconfig @@ -2,34 +2,23 @@  # RapidIO switches configuration  #  config RAPIDIO_TSI57X -	bool "IDT Tsi57x SRIO switches support" -	depends on RAPIDIO +	tristate "IDT Tsi57x SRIO switches support"  	---help---  	  Includes support for IDT Tsi57x family of serial RapidIO switches.  config RAPIDIO_CPS_XX -	bool "IDT CPS-xx SRIO switches support" -	depends on RAPIDIO +	tristate "IDT CPS-xx SRIO switches support"  	---help---  	  Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.  config RAPIDIO_TSI568 -	bool "Tsi568 SRIO switch support" -	depends on RAPIDIO +	tristate "Tsi568 SRIO switch support"  	default n  	---help---  	  Includes support for IDT Tsi568 serial RapidIO switch.  config RAPIDIO_CPS_GEN2 -	bool "IDT CPS Gen.2 SRIO switch support" -	depends on RAPIDIO +	tristate "IDT CPS Gen.2 SRIO switch support"  	default n  	---help---  	  Includes support for ITD CPS Gen.2 serial RapidIO switches. - -config RAPIDIO_TSI500 -	bool "Tsi500 Parallel RapidIO switch support" -	depends on RAPIDIO -	default n -	---help--- -	  Includes support for IDT Tsi500 parallel RapidIO switch. diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile index 48d67a6b98c..051cc6b3818 100644 --- a/drivers/rapidio/switches/Makefile +++ b/drivers/rapidio/switches/Makefile @@ -5,9 +5,4 @@  obj-$(CONFIG_RAPIDIO_TSI57X)	+= tsi57x.o  obj-$(CONFIG_RAPIDIO_CPS_XX)	+= idtcps.o  obj-$(CONFIG_RAPIDIO_TSI568)	+= tsi568.o -obj-$(CONFIG_RAPIDIO_TSI500)	+= tsi500.o  obj-$(CONFIG_RAPIDIO_CPS_GEN2)	+= idt_gen2.o - -ifeq ($(CONFIG_RAPIDIO_DEBUG),y) -EXTRA_CFLAGS += -DDEBUG -endif diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 0bb871cb5c4..9f7fe21580b 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c @@ -10,10 +10,14 @@   * option) any later version.   */ +#include <linux/stat.h> +#include <linux/module.h>  #include <linux/rio.h>  #include <linux/rio_drv.h>  #include <linux/rio_ids.h>  #include <linux/delay.h> + +#include <asm/page.h>  #include "../rio.h"  #define LOCAL_RTE_CONF_DESTID_SEL	0x010070 @@ -95,6 +99,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,  	else  		table++; +	if (route_port == RIO_INVALID_ROUTE) +		route_port = IDT_DEFAULT_ROUTE; +  	rio_mport_write_config_32(mport, destid, hopcount,  				  LOCAL_RTE_CONF_DESTID_SEL, table); @@ -209,9 +216,6 @@ idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,  static int  idtg2_em_init(struct rio_dev *rdev)  { -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	u32 regval;  	int i, tmp; @@ -220,29 +224,25 @@ idtg2_em_init(struct rio_dev *rdev)  	 * All standard EM configuration should be performed at upper level.  	 */ -	pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount); +	pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);  	/* Set Port-Write info CSR: PRIO=3 and CRF=1 */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_PW_INFO_CSR, 0x0000e000); +	rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000);  	/*  	 * Configure LT LAYER error reporting.  	 */  	/* Enable standard (RIO.p8) error reporting */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_LT_ERR_REPORT_EN, +	rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN,  			REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |  			REM_LTL_ERR_UNSUPTR);  	/* Use Port-Writes for LT layer error reporting.  	 * Enable per-port reset  	 */ -	rio_mport_read_config_32(mport, destid, hopcount, -			IDT_DEV_CTRL_1, ®val); -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_DEV_CTRL_1, +	rio_read_config_32(rdev, IDT_DEV_CTRL_1, ®val); +	rio_write_config_32(rdev, IDT_DEV_CTRL_1,  			regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);  	/* @@ -250,45 +250,40 @@ idtg2_em_init(struct rio_dev *rdev)  	 */  	/* Report all RIO.p8 errors supported by device */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); +	rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);  	/* Configure reporting of implementation specific errors/events */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED); +	rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC, +			    IDT_PORT_INIT_TX_ACQUIRED);  	/* Use Port-Writes for port error reporting and enable error logging */  	tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);  	for (i = 0; i < tmp; i++) { -		rio_mport_read_config_32(mport, destid, hopcount, -				IDT_PORT_OPS(i), ®val); -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev, IDT_PORT_OPS(i), ®val); +		rio_write_config_32(rdev,  				IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |  				IDT_PORT_OPS_PL_ELOG |  				IDT_PORT_OPS_LL_ELOG |  				IDT_PORT_OPS_LT_ELOG);  	}  	/* Overwrite error log if full */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); +	rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);  	/*  	 * Configure LANE error reporting.  	 */  	/* Disable line error reporting */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_LANE_ERR_REPORT_EN_BC, 0); +	rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0);  	/* Use Port-Writes for lane error reporting (when enabled)  	 * (do per-lane update because lanes may have different configuration)  	 */  	tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;  	for (i = 0; i < tmp; i++) { -		rio_mport_read_config_32(mport, destid, hopcount, -				IDT_LANE_CTRL(i), ®val); -		rio_mport_write_config_32(mport, destid, hopcount, -				IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW); +		rio_read_config_32(rdev, IDT_LANE_CTRL(i), ®val); +		rio_write_config_32(rdev, IDT_LANE_CTRL(i), +				    regval | IDT_LANE_CTRL_GENPW);  	}  	/* @@ -296,41 +291,32 @@ idtg2_em_init(struct rio_dev *rdev)  	 */  	/* Disable JTAG and I2C Error capture */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_AUX_PORT_ERR_CAP_EN, 0); +	rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0);  	/* Disable JTAG and I2C Error reporting/logging */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_AUX_ERR_REPORT_EN, 0); +	rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0);  	/* Disable Port-Write notification from JTAG */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_JTAG_CTRL, 0); +	rio_write_config_32(rdev, IDT_JTAG_CTRL, 0);  	/* Disable Port-Write notification from I2C */ -	rio_mport_read_config_32(mport, destid, hopcount, -			IDT_I2C_MCTRL, ®val); -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_I2C_MCTRL, -			regval & ~IDT_I2C_MCTRL_GENPW); +	rio_read_config_32(rdev, IDT_I2C_MCTRL, ®val); +	rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW);  	/*  	 * Configure CFG_BLK error reporting.  	 */  	/* Disable Configuration Block error capture */ -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_CFGBLK_ERR_CAPTURE_EN, 0); +	rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0);  	/* Disable Port-Writes for Configuration Block error reporting */ -	rio_mport_read_config_32(mport, destid, hopcount, -			IDT_CFGBLK_ERR_REPORT, ®val); -	rio_mport_write_config_32(mport, destid, hopcount, -			IDT_CFGBLK_ERR_REPORT, -			regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); +	rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, ®val); +	rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT, +			    regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);  	/* set TVAL = ~50us */ -	rio_mport_write_config_32(mport, destid, hopcount, +	rio_write_config_32(rdev,  		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);  	return 0; @@ -339,18 +325,15 @@ idtg2_em_init(struct rio_dev *rdev)  static int  idtg2_em_handler(struct rio_dev *rdev, u8 portnum)  { -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	u32 regval, em_perrdet, em_ltlerrdet; -	rio_mport_read_config_32(mport, destid, hopcount, +	rio_read_config_32(rdev,  		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);  	if (em_ltlerrdet) {  		/* Service Logical/Transport Layer Error(s) */  		if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {  			/* Implementation specific error reported */ -			rio_mport_read_config_32(mport, destid, hopcount, +			rio_read_config_32(rdev,  					IDT_ISLTL_ADDRESS_CAP, ®val);  			pr_debug("RIO: %s Implementation Specific LTL errors" \ @@ -358,13 +341,12 @@ idtg2_em_handler(struct rio_dev *rdev, u8 portnum)  				 rio_name(rdev), em_ltlerrdet, regval);  			/* Clear implementation specific address capture CSR */ -			rio_mport_write_config_32(mport, destid, hopcount, -					IDT_ISLTL_ADDRESS_CAP, 0); +			rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0);  		}  	} -	rio_mport_read_config_32(mport, destid, hopcount, +	rio_read_config_32(rdev,  		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);  	if (em_perrdet) {  		/* Service Port-Level Error(s) */ @@ -372,14 +354,14 @@ idtg2_em_handler(struct rio_dev *rdev, u8 portnum)  			/* Implementation Specific port error reported */  			/* Get IS errors reported */ -			rio_mport_read_config_32(mport, destid, hopcount, +			rio_read_config_32(rdev,  					IDT_PORT_ISERR_DET(portnum), ®val);  			pr_debug("RIO: %s Implementation Specific Port" \  				 " errors 0x%x\n", rio_name(rdev), regval);  			/* Clear all implementation specific events */ -			rio_mport_write_config_32(mport, destid, hopcount, +			rio_write_config_32(rdev,  					IDT_PORT_ISERR_DET(portnum), 0);  		}  	} @@ -391,14 +373,10 @@ static ssize_t  idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)  {  	struct rio_dev *rdev = to_rio_dev(dev); -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	ssize_t len = 0;  	u32 regval; -	while (!rio_mport_read_config_32(mport, destid, hopcount, -					 IDT_ERR_RD, ®val)) { +	while (!rio_read_config_32(rdev, IDT_ERR_RD, ®val)) {  		if (!regval)    /* 0 = end of log */  			break;  		len += snprintf(buf + len, PAGE_SIZE - len, @@ -412,12 +390,12 @@ idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)  static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); -static int idtg2_sysfs(struct rio_dev *rdev, int create) +static int idtg2_sysfs(struct rio_dev *rdev, bool create)  {  	struct device *dev = &rdev->dev;  	int err = 0; -	if (create == RIO_SW_SYSFS_CREATE) { +	if (create) {  		/* Initialize sysfs entries */  		err = device_create_file(dev, &dev_attr_errlog);  		if (err) @@ -428,20 +406,90 @@ static int idtg2_sysfs(struct rio_dev *rdev, int create)  	return err;  } -static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) +static struct rio_switch_ops idtg2_switch_ops = { +	.owner = THIS_MODULE, +	.add_entry = idtg2_route_add_entry, +	.get_entry = idtg2_route_get_entry, +	.clr_table = idtg2_route_clr_table, +	.set_domain = idtg2_set_domain, +	.get_domain = idtg2_get_domain, +	.em_init = idtg2_em_init, +	.em_handle = idtg2_em_handler, +}; + +static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id)  {  	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); -	rdev->rswitch->add_entry = idtg2_route_add_entry; -	rdev->rswitch->get_entry = idtg2_route_get_entry; -	rdev->rswitch->clr_table = idtg2_route_clr_table; -	rdev->rswitch->set_domain = idtg2_set_domain; -	rdev->rswitch->get_domain = idtg2_get_domain; -	rdev->rswitch->em_init = idtg2_em_init; -	rdev->rswitch->em_handle = idtg2_em_handler; -	rdev->rswitch->sw_sysfs = idtg2_sysfs; +	spin_lock(&rdev->rswitch->lock); + +	if (rdev->rswitch->ops) { +		spin_unlock(&rdev->rswitch->lock); +		return -EINVAL; +	} + +	rdev->rswitch->ops = &idtg2_switch_ops; + +	if (rdev->do_enum) { +		/* Ensure that default routing is disabled on startup */ +		rio_write_config_32(rdev, +				    RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); +	} + +	/* Create device-specific sysfs attributes */ +	idtg2_sysfs(rdev, true); + +	spin_unlock(&rdev->rswitch->lock);  	return 0;  } -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); +static void idtg2_remove(struct rio_dev *rdev) +{ +	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); +	spin_lock(&rdev->rswitch->lock); +	if (rdev->rswitch->ops != &idtg2_switch_ops) { +		spin_unlock(&rdev->rswitch->lock); +		return; +	} +	rdev->rswitch->ops = NULL; + +	/* Remove device-specific sysfs attributes */ +	idtg2_sysfs(rdev, false); + +	spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id idtg2_id_table[] = { +	{RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTSPS1616, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTCPS1432, RIO_VID_IDT)}, +	{ 0, }	/* terminate list */ +}; + +static struct rio_driver idtg2_driver = { +	.name = "idt_gen2", +	.id_table = idtg2_id_table, +	.probe = idtg2_probe, +	.remove = idtg2_remove, +}; + +static int __init idtg2_init(void) +{ +	return rio_register_driver(&idtg2_driver); +} + +static void __exit idtg2_exit(void) +{ +	pr_debug("RIO: %s\n", __func__); +	rio_unregister_driver(&idtg2_driver); +	pr_debug("RIO: %s done\n", __func__); +} + +device_initcall(idtg2_init); +module_exit(idtg2_exit); + +MODULE_DESCRIPTION("IDT CPS Gen.2 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index fc9f6374f75..7fbb60d3179 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c @@ -13,6 +13,7 @@  #include <linux/rio.h>  #include <linux/rio_drv.h>  #include <linux/rio_ids.h> +#include <linux/module.h>  #include "../rio.h"  #define CPS_DEFAULT_ROUTE	0xde @@ -26,6 +27,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,  {  	u32 result; +	if (route_port == RIO_INVALID_ROUTE) +		route_port = CPS_DEFAULT_ROUTE; +  	if (table == RIO_GLOBAL_TABLE) {  		rio_mport_write_config_32(mport, destid, hopcount,  				RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); @@ -115,33 +119,85 @@ idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,  	return 0;  } -static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) +static struct rio_switch_ops idtcps_switch_ops = { +	.owner = THIS_MODULE, +	.add_entry = idtcps_route_add_entry, +	.get_entry = idtcps_route_get_entry, +	.clr_table = idtcps_route_clr_table, +	.set_domain = idtcps_set_domain, +	.get_domain = idtcps_get_domain, +	.em_init = NULL, +	.em_handle = NULL, +}; + +static int idtcps_probe(struct rio_dev *rdev, const struct rio_device_id *id)  { -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount; -  	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); -	rdev->rswitch->add_entry = idtcps_route_add_entry; -	rdev->rswitch->get_entry = idtcps_route_get_entry; -	rdev->rswitch->clr_table = idtcps_route_clr_table; -	rdev->rswitch->set_domain = idtcps_set_domain; -	rdev->rswitch->get_domain = idtcps_get_domain; -	rdev->rswitch->em_init = NULL; -	rdev->rswitch->em_handle = NULL; - -	if (do_enum) { + +	spin_lock(&rdev->rswitch->lock); + +	if (rdev->rswitch->ops) { +		spin_unlock(&rdev->rswitch->lock); +		return -EINVAL; +	} + +	rdev->rswitch->ops = &idtcps_switch_ops; + +	if (rdev->do_enum) {  		/* set TVAL = ~50us */ -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_write_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); +		/* Ensure that default routing is disabled on startup */ +		rio_write_config_32(rdev, +				    RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE);  	} +	spin_unlock(&rdev->rswitch->lock);  	return 0;  } -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init); +static void idtcps_remove(struct rio_dev *rdev) +{ +	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); +	spin_lock(&rdev->rswitch->lock); +	if (rdev->rswitch->ops != &idtcps_switch_ops) { +		spin_unlock(&rdev->rswitch->lock); +		return; +	} +	rdev->rswitch->ops = NULL; +	spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id idtcps_id_table[] = { +	{RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTCPS12, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDTCPS16, RIO_VID_IDT)}, +	{RIO_DEVICE(RIO_DID_IDT70K200, RIO_VID_IDT)}, +	{ 0, }	/* terminate list */ +}; + +static struct rio_driver idtcps_driver = { +	.name = "idtcps", +	.id_table = idtcps_id_table, +	.probe = idtcps_probe, +	.remove = idtcps_remove, +}; + +static int __init idtcps_init(void) +{ +	return rio_register_driver(&idtcps_driver); +} + +static void __exit idtcps_exit(void) +{ +	rio_unregister_driver(&idtcps_driver); +} + +device_initcall(idtcps_init); +module_exit(idtcps_exit); + +MODULE_DESCRIPTION("IDT CPS Gen.1 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c deleted file mode 100644 index 914eddd5aa4..00000000000 --- a/drivers/rapidio/switches/tsi500.c +++ /dev/null @@ -1,78 +0,0 @@ -/* - * RapidIO Tsi500 switch support - * - * Copyright 2009-2010 Integrated Device Technology, Inc. - * Alexandre Bounine <alexandre.bounine@idt.com> - *  - Modified switch operations initialization. - * - * Copyright 2005 MontaVista Software, Inc. - * Matt Porter <mporter@kernel.crashing.org> - * - * This program is free software; you can redistribute  it and/or modify it - * under  the terms of  the GNU General  Public License as published by the - * Free Software Foundation;  either version 2 of the  License, or (at your - * option) any later version. - */ - -#include <linux/rio.h> -#include <linux/rio_drv.h> -#include <linux/rio_ids.h> -#include "../rio.h" - -static int -tsi500_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) -{ -	int i; -	u32 offset = 0x10000 + 0xa00 + ((route_destid / 2)&~0x3); -	u32 result; - -	if (table == 0xff) { -		rio_mport_read_config_32(mport, destid, hopcount, offset, &result); -		result &= ~(0xf << (4*(route_destid & 0x7))); -		for (i=0;i<4;i++) -			rio_mport_write_config_32(mport, destid, hopcount, offset + (0x20000*i), result | (route_port << (4*(route_destid & 0x7)))); -	} -	else { -		rio_mport_read_config_32(mport, destid, hopcount, offset + (0x20000*table), &result); -		result &= ~(0xf << (4*(route_destid & 0x7))); -		rio_mport_write_config_32(mport, destid, hopcount, offset + (0x20000*table), result | (route_port << (4*(route_destid & 0x7)))); -	} - -	return 0; -} - -static int -tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) -{ -	int ret = 0; -	u32 offset = 0x10000 + 0xa00 + ((route_destid / 2)&~0x3); -	u32 result; - -	if (table == 0xff) -		rio_mport_read_config_32(mport, destid, hopcount, offset, &result); -	else -		rio_mport_read_config_32(mport, destid, hopcount, offset + (0x20000*table), &result); - -	result &= 0xf << (4*(route_destid & 0x7)); -	*route_port = result >> (4*(route_destid & 0x7)); -	if (*route_port > 3) -		ret = -1; - -	return ret; -} - -static int tsi500_switch_init(struct rio_dev *rdev, int do_enum) -{ -	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); -	rdev->rswitch->add_entry = tsi500_route_add_entry; -	rdev->rswitch->get_entry = tsi500_route_get_entry; -	rdev->rswitch->clr_table = NULL; -	rdev->rswitch->set_domain = NULL; -	rdev->rswitch->get_domain = NULL; -	rdev->rswitch->em_init = NULL; -	rdev->rswitch->em_handle = NULL; - -	return 0; -} - -DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init); diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c index b9a389b9f81..8a43561b9d1 100644 --- a/drivers/rapidio/switches/tsi568.c +++ b/drivers/rapidio/switches/tsi568.c @@ -19,6 +19,7 @@  #include <linux/rio_drv.h>  #include <linux/rio_ids.h>  #include <linux/delay.h> +#include <linux/module.h>  #include "../rio.h"  /* Global (broadcast) route registers */ @@ -113,39 +114,86 @@ tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,  static int  tsi568_em_init(struct rio_dev *rdev)  { -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	u32 regval;  	int portnum; -	pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount); +	pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);  	/* Make sure that Port-Writes are disabled (for all ports) */  	for (portnum = 0;  	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { -		rio_mport_read_config_32(mport, destid, hopcount, -				TSI568_SP_MODE(portnum), ®val); -		rio_mport_write_config_32(mport, destid, hopcount, -				TSI568_SP_MODE(portnum), -				regval | TSI568_SP_MODE_PW_DIS); +		rio_read_config_32(rdev, TSI568_SP_MODE(portnum), ®val); +		rio_write_config_32(rdev, TSI568_SP_MODE(portnum), +				    regval | TSI568_SP_MODE_PW_DIS);  	}  	return 0;  } -static int tsi568_switch_init(struct rio_dev *rdev, int do_enum) +static struct rio_switch_ops tsi568_switch_ops = { +	.owner = THIS_MODULE, +	.add_entry = tsi568_route_add_entry, +	.get_entry = tsi568_route_get_entry, +	.clr_table = tsi568_route_clr_table, +	.set_domain = NULL, +	.get_domain = NULL, +	.em_init = tsi568_em_init, +	.em_handle = NULL, +}; + +static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id)  {  	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); -	rdev->rswitch->add_entry = tsi568_route_add_entry; -	rdev->rswitch->get_entry = tsi568_route_get_entry; -	rdev->rswitch->clr_table = tsi568_route_clr_table; -	rdev->rswitch->set_domain = NULL; -	rdev->rswitch->get_domain = NULL; -	rdev->rswitch->em_init = tsi568_em_init; -	rdev->rswitch->em_handle = NULL; +	spin_lock(&rdev->rswitch->lock); + +	if (rdev->rswitch->ops) { +		spin_unlock(&rdev->rswitch->lock); +		return -EINVAL; +	} + +	rdev->rswitch->ops = &tsi568_switch_ops; +	spin_unlock(&rdev->rswitch->lock);  	return 0;  } -DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init); +static void tsi568_remove(struct rio_dev *rdev) +{ +	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); +	spin_lock(&rdev->rswitch->lock); +	if (rdev->rswitch->ops != &tsi568_switch_ops) { +		spin_unlock(&rdev->rswitch->lock); +		return; +	} +	rdev->rswitch->ops = NULL; +	spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id tsi568_id_table[] = { +	{RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)}, +	{ 0, }	/* terminate list */ +}; + +static struct rio_driver tsi568_driver = { +	.name = "tsi568", +	.id_table = tsi568_id_table, +	.probe = tsi568_probe, +	.remove = tsi568_remove, +}; + +static int __init tsi568_init(void) +{ +	return rio_register_driver(&tsi568_driver); +} + +static void __exit tsi568_exit(void) +{ +	rio_unregister_driver(&tsi568_driver); +} + +device_initcall(tsi568_init); +module_exit(tsi568_exit); + +MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 2003fb63c40..42c8b014fe1 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c @@ -19,6 +19,7 @@  #include <linux/rio_drv.h>  #include <linux/rio_ids.h>  #include <linux/delay.h> +#include <linux/module.h>  #include "../rio.h"  /* Global (broadcast) route registers */ @@ -158,48 +159,45 @@ tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,  static int  tsi57x_em_init(struct rio_dev *rdev)  { -	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	u32 regval;  	int portnum; -	pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount); +	pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);  	for (portnum = 0;  	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {  		/* Make sure that Port-Writes are enabled (for all ports) */ -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  				TSI578_SP_MODE(portnum), ®val); -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_write_config_32(rdev,  				TSI578_SP_MODE(portnum),  				regval & ~TSI578_SP_MODE_PW_DIS);  		/* Clear all pending interrupts */ -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  				rdev->phys_efptr +  					RIO_PORT_N_ERR_STS_CSR(portnum),  				®val); -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_write_config_32(rdev,  				rdev->phys_efptr +  					RIO_PORT_N_ERR_STS_CSR(portnum),  				regval & 0x07120214); -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  				TSI578_SP_INT_STATUS(portnum), ®val); -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_write_config_32(rdev,  				TSI578_SP_INT_STATUS(portnum),  				regval & 0x000700bd);  		/* Enable all interrupts to allow ports to send a port-write */ -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  				TSI578_SP_CTL_INDEP(portnum), ®val); -		rio_mport_write_config_32(mport, destid, hopcount, +		rio_write_config_32(rdev,  				TSI578_SP_CTL_INDEP(portnum),  				regval | 0x000b0000);  		/* Skip next (odd) port if the current port is in x4 mode */ -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),  				®val);  		if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) @@ -207,7 +205,7 @@ tsi57x_em_init(struct rio_dev *rdev)  	}  	/* set TVAL = ~50us */ -	rio_mport_write_config_32(mport, destid, hopcount, +	rio_write_config_32(rdev,  		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);  	return 0; @@ -217,14 +215,12 @@ static int  tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)  {  	struct rio_mport *mport = rdev->net->hport; -	u16 destid = rdev->rswitch->destid; -	u8 hopcount = rdev->rswitch->hopcount;  	u32 intstat, err_status;  	int sendcount, checkcount;  	u8 route_port;  	u32 regval; -	rio_mport_read_config_32(mport, destid, hopcount, +	rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),  			&err_status); @@ -232,15 +228,15 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)  	    (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |  			  RIO_PORT_N_ERR_STS_PW_INP_ES))) {  		/* Remove any queued packets by locking/unlocking port */ -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),  			®val);  		if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { -			rio_mport_write_config_32(mport, destid, hopcount, +			rio_write_config_32(rdev,  				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),  				regval | RIO_PORT_N_CTL_LOCKOUT);  			udelay(50); -			rio_mport_write_config_32(mport, destid, hopcount, +			rio_write_config_32(rdev,  				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),  				regval);  		} @@ -248,7 +244,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)  		/* Read from link maintenance response register to clear  		 * valid bit  		 */ -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),  			®val); @@ -257,13 +253,12 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)  		 */  		sendcount = 3;  		while (sendcount) { -			rio_mport_write_config_32(mport, destid, hopcount, +			rio_write_config_32(rdev,  					  TSI578_SP_CS_TX(portnum), 0x40fc8000);  			checkcount = 3;  			while (checkcount--) {  				udelay(50); -				rio_mport_read_config_32( -					mport, destid, hopcount, +				rio_read_config_32(rdev,  					rdev->phys_efptr +  						RIO_PORT_N_MNT_RSP_CSR(portnum),  					®val); @@ -277,44 +272,100 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)  exit_es:  	/* Clear implementation specific error status bits */ -	rio_mport_read_config_32(mport, destid, hopcount, -				 TSI578_SP_INT_STATUS(portnum), &intstat); +	rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);  	pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n", -		 destid, hopcount, portnum, intstat); +		 rdev->destid, rdev->hopcount, portnum, intstat);  	if (intstat & 0x10000) { -		rio_mport_read_config_32(mport, destid, hopcount, +		rio_read_config_32(rdev,  				TSI578_SP_LUT_PEINF(portnum), ®val);  		regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);  		route_port = rdev->rswitch->route_table[regval];  		pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",  			rio_name(rdev), portnum, regval); -		tsi57x_route_add_entry(mport, destid, hopcount, +		tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,  				RIO_GLOBAL_TABLE, regval, route_port);  	} -	rio_mport_write_config_32(mport, destid, hopcount, -				  TSI578_SP_INT_STATUS(portnum), -				  intstat & 0x000700bd); +	rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum), +			    intstat & 0x000700bd);  	return 0;  } -static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) +static struct rio_switch_ops tsi57x_switch_ops = { +	.owner = THIS_MODULE, +	.add_entry = tsi57x_route_add_entry, +	.get_entry = tsi57x_route_get_entry, +	.clr_table = tsi57x_route_clr_table, +	.set_domain = tsi57x_set_domain, +	.get_domain = tsi57x_get_domain, +	.em_init = tsi57x_em_init, +	.em_handle = tsi57x_em_handler, +}; + +static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id)  {  	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); -	rdev->rswitch->add_entry = tsi57x_route_add_entry; -	rdev->rswitch->get_entry = tsi57x_route_get_entry; -	rdev->rswitch->clr_table = tsi57x_route_clr_table; -	rdev->rswitch->set_domain = tsi57x_set_domain; -	rdev->rswitch->get_domain = tsi57x_get_domain; -	rdev->rswitch->em_init = tsi57x_em_init; -	rdev->rswitch->em_handle = tsi57x_em_handler; +	spin_lock(&rdev->rswitch->lock); + +	if (rdev->rswitch->ops) { +		spin_unlock(&rdev->rswitch->lock); +		return -EINVAL; +	} +	rdev->rswitch->ops = &tsi57x_switch_ops; + +	if (rdev->do_enum) { +		/* Ensure that default routing is disabled on startup */ +		rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, +				    RIO_INVALID_ROUTE); +	} + +	spin_unlock(&rdev->rswitch->lock);  	return 0;  } -DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init); -DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init); +static void tsi57x_remove(struct rio_dev *rdev) +{ +	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); +	spin_lock(&rdev->rswitch->lock); +	if (rdev->rswitch->ops != &tsi57x_switch_ops) { +		spin_unlock(&rdev->rswitch->lock); +		return; +	} +	rdev->rswitch->ops = NULL; +	spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id tsi57x_id_table[] = { +	{RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)}, +	{RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)}, +	{RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)}, +	{RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)}, +	{ 0, }	/* terminate list */ +}; + +static struct rio_driver tsi57x_driver = { +	.name = "tsi57x", +	.id_table = tsi57x_id_table, +	.probe = tsi57x_probe, +	.remove = tsi57x_remove, +}; + +static int __init tsi57x_init(void) +{ +	return rio_register_driver(&tsi57x_driver); +} + +static void __exit tsi57x_exit(void) +{ +	rio_unregister_driver(&tsi57x_driver); +} + +device_initcall(tsi57x_init); +module_exit(tsi57x_exit); + +MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL");  | 
